summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc5
-rw-r--r--.github/ISSUE_TEMPLATE.md63
-rw-r--r--.gitignore6
-rw-r--r--.pylintrc (renamed from git/.pylintrc)173
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--.tito/releasers.conf6
-rw-r--r--.travis.yml8
-rw-r--r--.yamllint (renamed from git/.yamllint)0
-rw-r--r--CONTRIBUTING.md49
-rw-r--r--HOOKS.md70
-rw-r--r--README.md64
-rw-r--r--README_GCE.md3
-rw-r--r--bin/README.md6
-rw-r--r--callback_plugins/openshift_quick_installer.py28
-rw-r--r--filter_plugins/oo_filters.py1799
-rw-r--r--filter_plugins/oo_zabbix_filters.py160
-rw-r--r--filter_plugins/openshift_master.py45
-rwxr-xr-xgit/parent.py97
-rwxr-xr-xgit/pylint.sh51
-rwxr-xr-xgit/yaml_validation.py73
-rw-r--r--inventory/README.md2
-rw-r--r--inventory/aws/hosts/ec2.ini54
-rwxr-xr-xinventory/aws/hosts/ec2.py285
-rw-r--r--inventory/byo/hosts.origin.example57
-rw-r--r--inventory/byo/hosts.ose.example57
-rwxr-xr-xinventory/gce/hosts/gce.py252
-rwxr-xr-xinventory/libvirt/hosts/libvirt_generic.py10
-rwxr-xr-xlibrary/modify_yaml.py53
-rw-r--r--openshift-ansible.spec351
-rw-r--r--playbooks/README.md19
-rw-r--r--playbooks/adhoc/README.md5
-rw-r--r--playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py (renamed from playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py)11
-rw-r--r--playbooks/adhoc/noc/create_host.yml58
-rw-r--r--playbooks/adhoc/noc/create_maintenance.yml37
-rw-r--r--playbooks/adhoc/noc/get_zabbix_problems.yml43
-rw-r--r--playbooks/adhoc/uninstall.yml274
-rw-r--r--playbooks/adhoc/zabbix_setup/clean_zabbix.yml60
l---------playbooks/adhoc/zabbix_setup/filter_plugins1
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-clean-zaio.yml7
-rwxr-xr-xplaybooks/adhoc/zabbix_setup/oo-config-zaio.yml19
l---------playbooks/adhoc/zabbix_setup/roles1
-rw-r--r--playbooks/aws/README.md4
-rw-r--r--playbooks/byo/README.md11
-rw-r--r--playbooks/byo/openshift-cluster/openshift-logging.yml35
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml8
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml2
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml10
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml6
-rw-r--r--playbooks/byo/openshift-master/restart.yml14
-rw-r--r--playbooks/byo/openshift-preflight/README.md43
-rw-r--r--playbooks/byo/openshift-preflight/check.yml31
-rw-r--r--playbooks/common/README.md9
-rw-r--r--playbooks/common/openshift-cluster/config.yml3
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml23
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml5
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml5
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/restart.yml12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/backup.yml32
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml3
-rw-r--r--playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check193
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh12
-rw-r--r--playbooks/common/openshift-cluster/upgrades/init.yml17
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml66
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml6
-rw-r--r--playbooks/common/openshift-master/config.yml61
-rw-r--r--playbooks/common/openshift-master/restart_hosts.yml17
-rw-r--r--playbooks/common/openshift-master/restart_services.yml18
-rw-r--r--playbooks/common/openshift-master/validate_restart.yml (renamed from playbooks/common/openshift-master/restart.yml)13
-rw-r--r--playbooks/common/openshift-node/config.yml48
-rw-r--r--playbooks/gce/README.md4
-rw-r--r--playbooks/libvirt/README.md4
-rw-r--r--playbooks/openstack/README.md4
-rw-r--r--requirements.txt4
-rw-r--r--roles/docker/meta/main.yml1
-rw-r--r--roles/docker/tasks/main.yml30
-rw-r--r--roles/kube_nfs_volumes/library/partitionpool.py8
-rw-r--r--roles/lib_openshift/library/oc_edit.py1377
-rw-r--r--roles/lib_openshift/library/oc_obj.py1444
-rw-r--r--roles/lib_openshift/library/oc_route.py1614
-rw-r--r--roles/lib_openshift/library/oc_version.py1232
-rw-r--r--roles/lib_openshift/src/ansible/oc_edit.py48
-rw-r--r--roles/lib_openshift/src/ansible/oc_obj.py37
-rw-r--r--roles/lib_openshift/src/ansible/oc_route.py84
-rw-r--r--roles/lib_openshift/src/ansible/oc_version.py26
-rw-r--r--roles/lib_openshift/src/class/oc_edit.py94
-rw-r--r--roles/lib_openshift/src/class/oc_obj.py193
-rw-r--r--roles/lib_openshift/src/class/oc_route.py170
-rw-r--r--roles/lib_openshift/src/class/oc_version.py47
-rw-r--r--roles/lib_openshift/src/doc/edit116
-rw-r--r--roles/lib_openshift/src/doc/generated10
-rw-r--r--roles/lib_openshift/src/doc/license16
-rw-r--r--roles/lib_openshift/src/doc/obj95
-rw-r--r--roles/lib_openshift/src/doc/route120
-rw-r--r--roles/lib_openshift/src/doc/version40
-rwxr-xr-xroles/lib_openshift/src/generate.py75
-rw-r--r--roles/lib_openshift/src/lib/base.py522
-rw-r--r--roles/lib_openshift/src/lib/import.py17
-rw-r--r--roles/lib_openshift/src/lib/route.py123
-rw-r--r--roles/lib_openshift/src/sources.yml38
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_route.yml77
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_version.yml17
-rwxr-xr-xroles/lib_openshift/src/test/unit/oc_version.py70
-rw-r--r--roles/lib_utils/library/yedit.py774
-rw-r--r--roles/lib_utils/src/ansible/yedit.py42
-rw-r--r--roles/lib_utils/src/class/import.py11
-rw-r--r--roles/lib_utils/src/class/yedit.py566
-rw-r--r--roles/lib_utils/src/doc/generated9
-rw-r--r--roles/lib_utils/src/doc/license16
-rw-r--r--roles/lib_utils/src/doc/yedit138
-rwxr-xr-xroles/lib_utils/src/generate.py75
-rw-r--r--roles/lib_utils/src/sources.yml8
-rw-r--r--roles/lib_utils/src/test/integration/files/kube-manager.yaml39
-rw-r--r--roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig52
-rwxr-xr-xroles/lib_utils/src/test/integration/yedit_test.yml221
-rwxr-xr-xroles/lib_utils/src/test/unit/yedit_test.py277
-rw-r--r--roles/openshift_builddefaults/tasks/main.yml1
-rw-r--r--roles/openshift_builddefaults/vars/main.yml15
-rw-r--r--roles/openshift_buildoverrides/meta/main.yml15
-rw-r--r--roles/openshift_buildoverrides/tasks/main.yml6
-rw-r--r--roles/openshift_buildoverrides/vars/main.yml11
-rw-r--r--roles/openshift_ca/tasks/main.yml2
-rw-r--r--roles/openshift_certificate_expiry/README.md326
-rw-r--r--roles/openshift_certificate_expiry/examples/cert-expiry-report-html.pngbin0 -> 189466 bytes
-rw-r--r--roles/openshift_certificate_expiry/examples/cert-expiry-report.html396
-rw-r--r--roles/openshift_certificate_expiry/examples/cert-expiry-report.json178
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/default.yaml10
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml21
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml12
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml13
-rw-r--r--roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml12
-rw-r--r--roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py4
-rw-r--r--roles/openshift_certificate_expiry/library/openshift_cert_expiry.py115
-rw-r--r--roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j229
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml2
-rwxr-xr-xroles/openshift_examples/examples-sync.sh15
l---------roles/openshift_examples/files/examples/latest2
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-ephemeral-template.json43
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-persistent-template.json43
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-ephemeral-template.json39
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-persistent-template.json39
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/mysql-ephemeral-template.json51
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/mysql-persistent-template.json51
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-ephemeral-template.json25
-rw-r--r--roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-persistent-template.json25
-rw-r--r--roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json108
-rw-r--r--roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-rhel7.json106
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/README.md3
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/apicast-gateway-template.yml149
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json43
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json56
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json39
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json16
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json16
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json91
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json49
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml479
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json43
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json43
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json39
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json39
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json51
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json51
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json25
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json25
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/redis-ephemeral-template.json191
-rw-r--r--roles/openshift_examples/files/examples/v1.4/db-templates/redis-persistent-template.json215
-rw-r--r--roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json108
-rw-r--r--roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json106
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/README.md3
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/apicast-gateway-template.yml149
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json43
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json56
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json39
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json91
-rw-r--r--roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json49
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-app-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-example.yaml13
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml479
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/README.md76
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-ephemeral-template.json225
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-persistent-template.json249
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-ephemeral-template.json253
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-persistent-template.json277
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mysql-ephemeral-template.json253
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/mysql-persistent-template.json256
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json235
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json259
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json191
-rw-r--r--roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json215
-rw-r--r--roles/openshift_examples/files/examples/v1.5/image-streams/dotnet_imagestreams.json76
-rw-r--r--roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json829
-rw-r--r--roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json736
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/README.md22
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/apicast-gateway-template.yml149
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json531
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json487
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json500
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json275
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json299
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json517
-rw-r--r--roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json562
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-streams/fis-image-streams.json56
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-streams/jboss-image-streams.json372
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-basic.json321
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent-ssl.json549
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent.json371
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-ssl.json503
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-basic.json332
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-https.json501
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql-persistent.json779
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql.json739
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql-persistent.json756
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql.json716
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-basic-s2i.json415
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-extensions-support-s2i.json763
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-secure-s2i.json642
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-amq-s2i.json686
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-basic-s2i.json339
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-https-s2i.json473
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-amq-s2i.json696
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-basic-s2i.json339
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-https-s2i.json473
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-persistent-s2i.json813
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-s2i.json760
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-basic-s2i.json340
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-https-s2i.json525
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-persistent-s2i.json781
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-s2i.json741
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-persistent-s2i.json792
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-s2i.json752
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-persistent-s2i.json769
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-s2i.json729
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-sso-s2i.json756
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-persistent-s2i.json813
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-s2i.json760
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-basic-s2i.json351
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-https-s2i.json536
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-persistent-s2i.json792
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-s2i.json752
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-persistent-s2i.json807
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-s2i.json767
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-persistent-s2i.json784
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-s2i.json744
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-sso-s2i.json767
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-basic-s2i.json284
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-https-s2i.json398
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json654
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-s2i.json614
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json656
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-s2i.json616
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json633
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-s2i.json593
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-basic-s2i.json284
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-https-s2i.json398
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json654
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-s2i.json614
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json656
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-s2i.json616
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json633
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-s2i.json591
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json1079
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-s2i.json959
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json1052
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-s2i.json932
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-basic-s2i.json345
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-persistent-s2i.json792
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-s2i.json716
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-persistent-s2i.json765
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-s2i.json689
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-https.json514
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql-persistent.json750
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql.json719
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql-persistent.json727
-rw-r--r--roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql.json696
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py178
-rw-r--r--roles/openshift_facts/tasks/main.yml6
-rw-r--r--roles/openshift_facts/vars/main.yml7
-rw-r--r--roles/openshift_hosted/meta/main.yml21
-rw-r--r--roles/openshift_hosted/tasks/registry/secure.yml10
-rw-r--r--roles/openshift_hosted_metrics/README.md54
-rw-r--r--roles/openshift_hosted_metrics/defaults/main.yml (renamed from roles/openshift_metrics/defaults/main.yml)0
-rw-r--r--roles/openshift_hosted_metrics/handlers/main.yml (renamed from roles/openshift_metrics/handlers/main.yml)0
-rw-r--r--roles/openshift_hosted_metrics/meta/main.yaml18
-rw-r--r--roles/openshift_hosted_metrics/tasks/install.yml (renamed from roles/openshift_metrics/tasks/install.yml)22
-rw-r--r--roles/openshift_hosted_metrics/tasks/main.yaml75
-rw-r--r--roles/openshift_hosted_metrics/vars/main.yaml21
-rw-r--r--roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml2
-rw-r--r--roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml3
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml342
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml124
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml345
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml168
-rw-r--r--roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml124
-rw-r--r--roles/openshift_loadbalancer/defaults/main.yml2
-rw-r--r--roles/openshift_loadbalancer/tasks/main.yml25
-rw-r--r--roles/openshift_loadbalancer/templates/haproxy.docker.service.j217
-rw-r--r--roles/openshift_logging/README.md91
-rw-r--r--roles/openshift_logging/defaults/main.yml85
-rw-r--r--roles/openshift_logging/files/curator.yml18
-rw-r--r--roles/openshift_logging/files/elasticsearch-logging.yml72
-rw-r--r--roles/openshift_logging/files/es_migration.sh79
-rw-r--r--roles/openshift_logging/files/fluent.conf34
-rw-r--r--roles/openshift_logging/files/fluentd-throttle-config.yaml7
-rw-r--r--roles/openshift_logging/files/generate-jks.sh178
-rw-r--r--roles/openshift_logging/files/logging-deployer-sa.yaml6
-rw-r--r--roles/openshift_logging/files/secure-forward.conf24
-rw-r--r--roles/openshift_logging/files/server-tls.json5
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py38
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py340
-rw-r--r--roles/openshift_logging/meta/main.yaml15
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml114
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml143
-rw-r--r--roles/openshift_logging/tasks/generate_clusterrolebindings.yaml13
-rw-r--r--roles/openshift_logging/tasks/generate_clusterroles.yaml11
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml117
-rw-r--r--roles/openshift_logging/tasks/generate_deploymentconfigs.yaml65
-rw-r--r--roles/openshift_logging/tasks/generate_jks.yaml111
-rw-r--r--roles/openshift_logging/tasks/generate_pems.yaml36
-rw-r--r--roles/openshift_logging/tasks/generate_pvcs.yaml49
-rw-r--r--roles/openshift_logging/tasks/generate_rolebindings.yaml12
-rw-r--r--roles/openshift_logging/tasks/generate_routes.yaml21
-rw-r--r--roles/openshift_logging/tasks/generate_secrets.yaml77
-rw-r--r--roles/openshift_logging/tasks/generate_serviceaccounts.yaml14
-rw-r--r--roles/openshift_logging/tasks/generate_services.yaml87
-rw-r--r--roles/openshift_logging/tasks/install_curator.yaml51
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml107
-rw-r--r--roles/openshift_logging/tasks/install_fluentd.yaml54
-rw-r--r--roles/openshift_logging/tasks/install_kibana.yaml58
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml56
-rw-r--r--roles/openshift_logging/tasks/install_support.yaml54
-rw-r--r--roles/openshift_logging/tasks/label_node.yaml29
-rw-r--r--roles/openshift_logging/tasks/main.yaml39
-rw-r--r--roles/openshift_logging/tasks/oc_apply.yaml29
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml52
-rw-r--r--roles/openshift_logging/tasks/scale.yaml28
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml104
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml97
-rw-r--r--roles/openshift_logging/tasks/upgrade_logging.yaml41
-rw-r--r--roles/openshift_logging/templates/clusterrole.j221
-rw-r--r--roles/openshift_logging/templates/clusterrolebinding.j224
-rw-r--r--roles/openshift_logging/templates/curator.j297
-rw-r--r--roles/openshift_logging/templates/elasticsearch.yml.j275
-rw-r--r--roles/openshift_logging/templates/es.j2105
-rw-r--r--roles/openshift_logging/templates/fluentd.j2149
-rw-r--r--roles/openshift_logging/templates/jks_pod.j228
-rw-r--r--roles/openshift_logging/templates/kibana.j2110
-rw-r--r--roles/openshift_logging/templates/oauth-client.j215
-rw-r--r--roles/openshift_logging/templates/pvc.j227
-rw-r--r--roles/openshift_logging/templates/rolebinding.j214
-rw-r--r--roles/openshift_logging/templates/route_reencrypt.j225
-rw-r--r--roles/openshift_logging/templates/secret.j29
-rw-r--r--roles/openshift_logging/templates/service.j228
-rw-r--r--roles/openshift_logging/templates/serviceaccount.j216
-rw-r--r--roles/openshift_logging/templates/signing.conf.j2103
-rw-r--r--roles/openshift_logging/vars/main.yaml8
-rw-r--r--roles/openshift_master/meta/main.yml32
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j22
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml65
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py4
-rw-r--r--roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py4
-rw-r--r--roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py6
-rw-r--r--roles/openshift_master_facts/vars/main.yml21
-rw-r--r--roles/openshift_metrics/README.md84
-rw-r--r--roles/openshift_metrics/defaults/main.yaml48
-rwxr-xr-xroles/openshift_metrics/files/import_jks_certs.sh116
-rw-r--r--roles/openshift_metrics/meta/main.yaml16
-rw-r--r--roles/openshift_metrics/tasks/generate_certificates.yaml26
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml166
-rw-r--r--roles/openshift_metrics/tasks/generate_heapster_certificates.yaml41
-rw-r--r--roles/openshift_metrics/tasks/generate_rolebindings.yaml33
-rw-r--r--roles/openshift_metrics/tasks/generate_serviceaccounts.yaml27
-rw-r--r--roles/openshift_metrics/tasks/generate_services.yaml46
-rw-r--r--roles/openshift_metrics/tasks/import_jks_certs.yaml72
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml54
-rw-r--r--roles/openshift_metrics/tasks/install_hawkular.yaml54
-rw-r--r--roles/openshift_metrics/tasks/install_heapster.yaml14
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml45
-rw-r--r--roles/openshift_metrics/tasks/install_support.yaml23
-rw-r--r--roles/openshift_metrics/tasks/main.yaml77
-rw-r--r--roles/openshift_metrics/tasks/oc_apply.yaml32
-rw-r--r--roles/openshift_metrics/tasks/scale.yaml30
-rw-r--r--roles/openshift_metrics/tasks/setup_certificate.yaml52
-rw-r--r--roles/openshift_metrics/tasks/start_metrics.yaml54
-rw-r--r--roles/openshift_metrics/tasks/stop_metrics.yaml55
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml19
-rw-r--r--roles/openshift_metrics/templates/hawkular_cassandra_rc.j2125
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j2119
-rw-r--r--roles/openshift_metrics/templates/heapster.j298
-rw-r--r--roles/openshift_metrics/templates/pvc.j227
-rw-r--r--roles/openshift_metrics/templates/rolebinding.j223
-rw-r--r--roles/openshift_metrics/templates/route.j235
-rw-r--r--roles/openshift_metrics/templates/secret.j212
-rw-r--r--roles/openshift_metrics/templates/service.j232
-rw-r--r--roles/openshift_metrics/templates/serviceaccount.j216
-rw-r--r--roles/openshift_metrics/vars/main.yaml29
-rw-r--r--roles/openshift_node/README.md4
-rw-r--r--roles/openshift_node/meta/main.yml33
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml26
-rw-r--r--roles/openshift_preflight/README.md52
-rwxr-xr-xroles/openshift_preflight/base/library/aos_version.py100
-rwxr-xr-xroles/openshift_preflight/base/library/check_yum_update.py116
-rw-r--r--roles/openshift_preflight/common/meta/main.yml3
-rw-r--r--roles/openshift_preflight/common/tasks/main.yml21
-rw-r--r--roles/openshift_preflight/init/meta/main.yml3
-rw-r--r--roles/openshift_preflight/init/tasks/main.yml4
-rw-r--r--roles/openshift_preflight/masters/meta/main.yml3
-rw-r--r--roles/openshift_preflight/masters/tasks/main.yml31
-rw-r--r--roles/openshift_preflight/nodes/meta/main.yml3
-rw-r--r--roles/openshift_preflight/nodes/tasks/main.yml41
-rw-r--r--roles/openshift_preflight/verify_status/callback_plugins/zz_failure_summary.py96
-rw-r--r--roles/openshift_preflight/verify_status/tasks/main.yml8
-rw-r--r--roles/openshift_repos/tasks/main.yaml4
-rw-r--r--roles/openshift_repos/templates/yum_repo.j24
-rw-r--r--roles/openshift_storage_nfs_lvm/README.md9
-rw-r--r--roles/openshift_storage_nfs_lvm/defaults/main.yml7
-rw-r--r--roles/openshift_storage_nfs_lvm/meta/main.yml3
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/main.yml2
-rw-r--r--roles/openshift_storage_nfs_lvm/templates/nfs.json.j26
-rw-r--r--roles/openshift_version/tasks/set_version_containerized.yml5
-rw-r--r--roles/os_firewall/README.md7
-rw-r--r--roles/os_firewall/defaults/main.yml8
-rwxr-xr-xroles/os_firewall/library/os_firewall_manage_iptables.py16
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml5
-rw-r--r--roles/os_firewall/tasks/main.yml6
-rw-r--r--roles/rhel_subscribe/tasks/main.yml5
-rw-r--r--setup.cfg27
-rw-r--r--setup.py245
-rw-r--r--test-requirements.txt11
-rw-r--r--tox.ini19
-rw-r--r--utils/.coveragerc1
l---------utils/.pylintrc1
-rw-r--r--utils/Makefile35
-rw-r--r--utils/README.md41
-rw-r--r--utils/docs/man/man1/atomic-openshift-installer.18
-rw-r--r--utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in2
-rw-r--r--utils/setup.cfg8
-rw-r--r--utils/src/ooinstall/cli_installer.py16
-rw-r--r--utils/src/ooinstall/oo_config.py27
-rw-r--r--utils/src/ooinstall/openshift_ansible.py22
-rw-r--r--utils/src/ooinstall/variants.py44
-rw-r--r--utils/test-requirements.txt6
-rw-r--r--utils/test/cli_installer_tests.py21
-rw-r--r--utils/test/fixture.py10
-rw-r--r--utils/test/oo_config_tests.py7
-rw-r--r--utils/test/openshift_ansible_tests.py71
-rw-r--r--utils/test/test_utils.py11
-rw-r--r--utils/tox.ini16
462 files changed, 78434 insertions, 3135 deletions
diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 000000000..e1d918755
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,5 @@
+[run]
+omit=
+ */lib/python*/site-packages/*
+ */lib/python*/*
+ /usr/*
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 627fa13eb..2a4f80a36 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -1,21 +1,64 @@
-[provide a description of the issue]
+#### Description
+
+Provide a brief description of your issue here. For example:
+
+> On a multi master install, if the first master goes down we can no
+> longer scaleup the cluster with new nodes or masters.
+
##### Version
-[if you're operating from a git clone provide the output of `git describe`]
-[if you're running from playbooks installed via RPM or atomic-openshift-utils `rpm -q atomic-openshift-utils openshift-ansible`]
-[Your version of ansible, `ansible --version`]
+Please put the following version information in the code block
+indicated below.
+
+* Your ansible version per `ansible --version`
+
+If you're operating from a **git clone**:
+
+* The output of `git describe`
+
+If you're running from playbooks installed via RPM or
+`atomic-openshift-utils`
+
+* The output of `rpm -q atomic-openshift-utils openshift-ansible`
+
+Place the output between the code block below:
+
+```
+VERSION INFORMATION HERE PLEASE
+```
##### Steps To Reproduce
1. [step 1]
2. [step 2]
-##### Current Result
-##### Expected Result
+##### Expected Results
+Describe what you expected to happen.
+
+```
+Example command and output or error messages
+```
+
+##### Observed Results
+Describe what is actually happening.
+
+```
+Example command and output or error messages
+```
+
+For long output or logs, consider using a [gist](https://gist.github.com/)
+
##### Additional Information
-[The exact command you ran]
-[Your operating system and version, ie: RHEL 7.2, Fedora 23]
-[Your inventory file]
-[visit https://docs.openshift.org/latest/welcome/index.html]
+
+Provide any additional information which may help us diagnose the
+issue.
+
+* Your operating system and version, ie: RHEL 7.2, Fedora 23 (`$ cat /etc/redhat-release`)
+* Your inventory file (especially any non-standard configuration parameters)
+* Sample code, etc
+
+```
+EXTRA INFORMATION GOES HERE
+```
diff --git a/.gitignore b/.gitignore
index 48507c5d1..9af271235 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,3 +21,9 @@ multi_inventory.yaml
ansible.cfg
*.retry
.vscode/*
+.cache
+.tox
+.coverage
+*.egg-info
+.eggs
+cover
diff --git a/git/.pylintrc b/.pylintrc
index 9c98889b3..a32bd3d68 100644
--- a/git/.pylintrc
+++ b/.pylintrc
@@ -1,5 +1,4 @@
[MASTER]
-
# Specify a configuration file.
#rcfile=
@@ -7,12 +6,9 @@
# pygtk.require().
#init-hook=
-# Profiled execution.
-profile=no
-
# Add files or directories to the blacklist. They should be base names, not
# paths.
-ignore=CVS
+ignore=CVS,setup.py
# Pickle collected data for later comparisons.
persistent=no
@@ -21,14 +17,6 @@ persistent=no
# usually to register additional checkers.
load-plugins=
-# Deprecated. It was used to include message's id in output. Use --msg-template
-# instead.
-#include-ids=no
-
-# Deprecated. It was used to include symbolic ids of messages in output. Use
-# --msg-template instead.
-#symbols=no
-
# Use multiple processes to speed up Pylint.
jobs=1
@@ -58,7 +46,8 @@ confidence=
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
-# multiple time. See also the "--disable" option for examples.
+# multiple time (only on the command line, not in the configuration file where
+# it should appear only once). See also the "--disable" option for examples.
#enable=
# Disable the message, report, category or checker with the given id(s). You
@@ -70,8 +59,7 @@ confidence=
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
-# w0511 - fixme - disabled because TODOs are acceptable
-disable=E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606,W1621,W1620,W1623,W1622,W1625,W1624,W1609,W1608,W1607,W1606,W1605,W1604,W1603,W1602,W1601,W1639,W1640,I0021,W1638,I0020,W1618,W1619,W1630,W1626,W1637,W1634,W1635,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1632,W1633,W0704,W1628,W1629,W1636,W0511,R0801,locally-disabled,file-ignored
+disable=import-star-module-level,old-octal-literal,oct-method,print-statement,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating
[REPORTS]
@@ -96,20 +84,24 @@ reports=no
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
-# Add a comment according to your evaluation note. This is used by the global
-# evaluation report (RP0004).
-comment=no
-
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
#msg-template=
-[LOGGING]
+[SIMILARITIES]
-# Logging modules to check that the string format arguments are in logging
-# function parameter format
-logging-modules=logging
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=yes
[BASIC]
@@ -192,44 +184,23 @@ method-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match function or class names that do
# not require a docstring.
-no-docstring-rgx=__.*__
+no-docstring-rgx=^_
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
-[SIMILARITIES]
-
-# Minimum lines number of a similarity.
-min-similarity-lines=0
-
-# Ignore comments when computing similarities.
-ignore-comments=yes
-
-# Ignore docstrings when computing similarities.
-ignore-docstrings=yes
-
-# Ignore imports when computing similarities.
-ignore-imports=yes
-
-
-[VARIABLES]
+[ELIF]
-# Tells whether we should check for unused import in __init__ files.
-init-import=no
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
-# A regular expression matching the name of dummy variables (i.e. expectedly
-# not used).
-dummy-variables-rgx=_$|dummy
-# List of additional names supposed to be defined in builtins. Remember that
-# you should avoid to define new builtins when possible.
-additional-builtins=
+[MISCELLANEOUS]
-# List of strings which can identify a callback function by name. A callback
-# name must start or end with one of those strings.
-callbacks=cb_,_cb
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
[TYPECHECK]
@@ -240,27 +211,30 @@ ignore-mixin-members=yes
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
-# and thus existing member attributes cannot be deduced by static analysis
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# List of classes names for which member attributes should not be checked
-# (useful for classes with attributes dynamically set).
-ignored-classes=SQLObject
-
-# When zope mode is activated, add a predefined set of Zope acquired attributes
-# to generated-members.
-zope=no
+# (useful for classes with attributes dynamically set). This supports can work
+# with qualified names.
+ignored-classes=
# List of members which are set dynamically and missed by pylint inference
-# system, and so shouldn't trigger E0201 when accessed. Python regular
+# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
-generated-members=REQUEST,acl_users,aq_parent
+generated-members=
[SPELLING]
-# Spelling dictionary name. Available dictionaries: none. To make it working
-# install python-enchant package.
+# Spelling dictionary name. Available dictionaries: en_ZW (myspell), en_NG
+# (myspell), en_NA (myspell), en_NZ (myspell), en_PH (myspell), en_AG
+# (myspell), en_BW (myspell), en_IE (myspell), en_ZM (myspell), en_DK
+# (myspell), en_CA (myspell), en_GH (myspell), en_IN (myspell), en_BZ
+# (myspell), en_MW (myspell), en_TT (myspell), en_JM (myspell), en_GB
+# (myspell), en_ZA (myspell), en_SG (myspell), en_AU (myspell), en_US
+# (myspell), en_BS (myspell), en_HK (myspell).
spelling-dict=
# List of comma separated words that should not be checked.
@@ -274,12 +248,6 @@ spelling-private-dict-file=
spelling-store-unknown-words=no
-[MISCELLANEOUS]
-
-# List of note tags to take in consideration, separated by a comma.
-notes=FIXME,XXX,TODO
-
-
[FORMAT]
# Maximum number of characters on a single line.
@@ -292,23 +260,67 @@ ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# else.
single-line-if-stmt=no
-# List of optional constructs for which whitespace checking is disabled
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
no-space-check=trailing-comma,dict-separator
# Maximum number of lines in a module
max-module-lines=1000
-# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
-# Number of spaces of indent required inside a hanging or continued line.
+# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
[DESIGN]
# Maximum number of arguments for function / method
@@ -342,21 +354,8 @@ min-public-methods=2
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
-
-[CLASSES]
-
-# List of method names used to declare (i.e. assign) instance attributes.
-defining-attr-methods=__init__,__new__,setUp
-
-# List of valid names for the first argument in a class method.
-valid-classmethod-first-arg=cls
-
-# List of valid names for the first argument in a metaclass class method.
-valid-metaclass-classmethod-first-arg=mcs
-
-# List of member names, which should be excluded from the protected access
-# warning.
-exclude-protected=_asdict,_fields,_replace,_source,_make
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
[IMPORTS]
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index bde176e44..fd9a1844f 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.4.17-1 ./
+3.5.1-1 ./
diff --git a/.tito/releasers.conf b/.tito/releasers.conf
index daa350cf6..032212b24 100644
--- a/.tito/releasers.conf
+++ b/.tito/releasers.conf
@@ -27,6 +27,12 @@ releaser = tito.release.DistGitReleaser
branches = rhaos-3.4-rhel-7
srpm_disttag = .el7aos
+[aos-3.5]
+releaser = tito.release.DistGitReleaser
+branches = rhaos-3.5-rhel-7
+srpm_disttag = .el7aos
+
+
[copr-openshift-ansible]
releaser = tito.release.CoprReleaser
project_name = @OpenShiftOnlineOps/openshift-ansible
diff --git a/.travis.yml b/.travis.yml
index b5b7a2a59..f0a228c23 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,14 +1,20 @@
---
sudo: false
+cache:
+ - pip
+
language: python
python:
- "2.7"
+ - "3.5"
install:
- pip install -r requirements.txt
+ - pip install tox-travis
script:
# TODO(rhcarvalho): check syntax of other important entrypoint playbooks
- ansible-playbook --syntax-check playbooks/byo/config.yml
- - cd utils && make ci
+ - tox
+ - cd utils && tox
diff --git a/git/.yamllint b/.yamllint
index 573321a94..573321a94 100644
--- a/git/.yamllint
+++ b/.yamllint
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 1145da495..83c844e28 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -66,30 +66,55 @@ These are plugins used in playbooks and roles:
└── test Contains tests.
```
-### Others
-
-```
-.
-└── git Contains some helper scripts for repository maintenance.
-```
-
## Building RPMs
See the [RPM build instructions](BUILD.md).
## Running tests
-We use [Nose](http://readthedocs.org/docs/nose/) as a test runner. Make sure it
-is installed along with other test dependencies:
+This section covers how to run tests for the root of this repo, running tests
+for the oo-install wrapper is described in [utils/README.md](utils/README.md).
+
+We use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run
+tests. Alternatively, tests can be run using
+[detox](https://pypi.python.org/pypi/detox/) which allows for running tests in
+parallel
+
```
-pip install -r utils/test-requirements.txt
+pip install tox detox
```
-Run the tests with:
+List the test environments available:
+```
+tox -l
+```
+
+Run all of the tests with:
+```
+tox
+```
+
+Run all of the tests in parallel with detox:
+```
+detox
+```
+
+Running a particular test environment (python 2.7 flake8 tests in this case):
+```
+tox -e py27-ansible22-flake8
+```
+
+Running a particular test environment in a clean virtualenv (python 3.5 pylint
+tests in this case):
+```
+tox -r -e py35-ansible22-pylint
+```
+If you want to enter the virtualenv created by tox to do additional
+testing/debugging (py27-flake8 env in this case):
```
-nosetests
+source .tox/py27-ansible22-flake8/bin/activate
```
## Submitting contributions
diff --git a/HOOKS.md b/HOOKS.md
new file mode 100644
index 000000000..9c5f80054
--- /dev/null
+++ b/HOOKS.md
@@ -0,0 +1,70 @@
+# Hooks
+
+The ansible installer allows for operators to execute custom tasks during
+specific operations through a system called hooks. Hooks allow operators to
+provide files defining tasks to execute before and/or after specific areas
+during installations and upgrades. This can be very helpful to validate
+or modify custom infrastructure when installing/upgrading OpenShift.
+
+It is important to remember that when a hook fails the operation fails. This
+means a good hook can run multiple times and provide the same results. A great
+hook is idempotent.
+
+**Note**: There is currently not a standard interface for hooks. In the future
+a standard interface will be defined and any hooks that existed previously will
+need to be updated to meet the new standard.
+
+## Using Hooks
+
+Hooks are defined in the ``hosts`` inventory file under the ``OSEv3:vars``
+section.
+
+Each hook should point to a yaml file which defines Ansible tasks. This file
+will be used as an include meaning that the file can not be a playbook but
+a set of tasks. Best practice suggests using absolute paths to the hook file to avoid any ambiguity.
+
+### Example
+```ini
+[OSEv3:vars]
+# <snip>
+openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
+openshift_master_upgrade_hook=/usr/share/custom/master.yml
+openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
+# <snip>
+```
+
+Hook files must be a yaml formatted file that defines a set of Ansible tasks.
+The file may **not** be a playbook.
+
+### Example
+```yaml
+---
+# Trivial example forcing an operator to ack the start of an upgrade
+# file=/usr/share/custom/pre_master.yml
+
+- name: note the start of a master upgrade
+ debug:
+ msg: "Master upgrade of {{ inventory_hostname }} is about to start"
+
+- name: require an operator agree to start an upgrade
+ pause:
+ prompt: "Hit enter to start the master upgrade"
+```
+
+## Upgrade Hooks
+
+### openshift_master_upgrade_pre_hook
+- Runs **before** each master is upgraded.
+- This hook runs against **each master** in serial.
+- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation).
+
+### openshift_master_upgrade_hook
+- Runs **after** each master is upgraded but **before** it's service/system restart.
+- This hook runs against **each master** in serial.
+- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation).
+
+
+### openshift_master_upgrade_post_hook
+- Runs **after** each master is upgraded and has had it's service/system restart.
+- This hook runs against **each master** in serial.
+- If a task needs to run against a different host, said task will need to use [``delegate_to`` or ``local_action``](http://docs.ansible.com/ansible/playbooks_delegation.html#delegation).
diff --git a/README.md b/README.md
index 635981b45..c3c022e59 100644
--- a/README.md
+++ b/README.md
@@ -3,30 +3,48 @@
# OpenShift Ansible
-This repository contains [Ansible](https://www.ansible.com/) code to install,
-upgrade and manage [OpenShift](https://www.openshift.com/) clusters.
-
-**Note**: the Ansible playbooks in this repository require an RPM package that
-provides `docker`. Currently, the RPMs from
-[dockerproject.org](https://dockerproject.org/) do not provide this requirement,
-though they may in the future. This limitation is being tracked by
+This repository contains [Ansible](https://www.ansible.com/) roles and
+playbooks to install, upgrade, and manage
+[OpenShift](https://www.openshift.com/) clusters.
+
+**Note**: the Ansible playbooks in this repository require an RPM
+package that provides `docker`. Currently, the RPMs from
+[dockerproject.org](https://dockerproject.org/) do not provide this
+requirement, though they may in the future. This limitation is being
+tracked by
[#2720](https://github.com/openshift/openshift-ansible/issues/2720).
-## Branches and tags
+## Getting the correct version
+
+The
+[master branch](https://github.com/openshift/openshift-ansible/tree/master)
+tracks our current work **in development** and should be compatible
+with the
+[Origin master branch](https://github.com/openshift/origin/tree/master)
+(code in development).
+
+In addition to the master branch, we maintain stable branches
+corresponding to upstream Origin releases, e.g.: we guarantee an
+openshift-ansible 3.2 release will fully support an origin
+[1.2 release](https://github.com/openshift/openshift-ansible/tree/release-1.2).
+The most recent branch will often receive minor feature backports and
+fixes. Older branches will receive only critical fixes.
+
+**Getting the right openshift-ansible release**
+
+Follow this release pattern and you can't go wrong:
-The [master branch](https://github.com/openshift/openshift-ansible/tree/master)
-tracks our current work and should be compatible with both [Origin master
-branch](https://github.com/openshift/origin/tree/master) and the [most recent
-Origin stable release](https://github.com/openshift/origin/releases). Currently
-that's v1.4 and v1.3.x. In addition to the master branch, we maintain stable
-branches corresponding to upstream Origin releases, e.g.:
-[release-1.2](https://github.com/openshift/openshift-ansible/tree/release-1.2).
-The most recent branch will often receive minor feature backports and fixes.
-Older branches will receive only critical fixes.
+| Origin | OpenShift-Ansible |
+| ------------- | ----------------- |
+| 1.3 | 3.3 |
+| 1.4 | 3.4 |
+| 1.*X* | 3.*X* |
+
+If you're running from the openshift-ansible **master branch** we can
+only guarantee compatibility with the newest origin releases **in
+development**. Use a branch corresponding to your origin version if
+you are not running a stable release.
-Releases are tagged periodically from active branches and are versioned 3.x
-corresponding to Origin releases 1.x. We unfortunately started with 3.0 and it's
-not practical to start over at 1.0.
## Setup
@@ -56,6 +74,12 @@ not practical to start over at 1.0.
- [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)
- [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
+
+## Installer Hooks
+
+See the [hooks documentation](HOOKS.md).
+
+
## Contributing
See the [contribution guide](CONTRIBUTING.md).
diff --git a/README_GCE.md b/README_GCE.md
index f909630aa..99c8715de 100644
--- a/README_GCE.md
+++ b/README_GCE.md
@@ -87,7 +87,8 @@ Install Dependencies
```
> Installation using Mac OSX requires pycrypto library
-> $ pip install pycrypto
+>
+> <kbd>$ pip install pycrypto</kbd>
Test The Setup
--------------
diff --git a/bin/README.md b/bin/README.md
new file mode 100644
index 000000000..fec17cb9b
--- /dev/null
+++ b/bin/README.md
@@ -0,0 +1,6 @@
+# The `bin/cluster` tool
+
+This tool was meant to be the entry point for managing OpenShift clusters,
+running against different "providers" (`aws`, `gce`, `libvirt`, `openstack`),
+though its use is now deprecated in favor of the [`byo`](../playbooks/byo)
+playbooks.
diff --git a/callback_plugins/openshift_quick_installer.py b/callback_plugins/openshift_quick_installer.py
index fc9bfb899..b4c7edd38 100644
--- a/callback_plugins/openshift_quick_installer.py
+++ b/callback_plugins/openshift_quick_installer.py
@@ -36,30 +36,13 @@ What's different:
"""
from __future__ import (absolute_import, print_function)
-import imp
-import os
import sys
from ansible import constants as C
+from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
-ANSIBLE_PATH = imp.find_module('ansible')[1]
-DEFAULT_PATH = os.path.join(ANSIBLE_PATH, 'plugins/callback/default.py')
-DEFAULT_MODULE = imp.load_source(
- 'ansible.plugins.callback.default',
- DEFAULT_PATH
-)
-try:
- from ansible.plugins.callback import CallbackBase
- BASECLASS = CallbackBase
-except ImportError: # < ansible 2.1
- BASECLASS = DEFAULT_MODULE.CallbackModule
-
-reload(sys)
-sys.setdefaultencoding('utf-8')
-
-
-class CallbackModule(DEFAULT_MODULE.CallbackModule):
+class CallbackModule(CallbackBase):
"""
Ansible callback plugin
@@ -286,8 +269,9 @@ The only thing we change here is adding `log_only=True` to the
self._display.display("", screen_only=True)
# Some plays are conditional and won't run (such as load
- # balancers) if they aren't required. Let the user know about
- # this to avoid potential confusion.
+ # balancers) if they aren't required. Sometimes plays are
+ # conditionally included later in the run. Let the user know
+ # about this to avoid potential confusion.
if self.plays_total_ran != self.plays_count:
- print("Installation Complete: Note: Play count is an estimate and some were skipped because your install does not require them")
+ print("Installation Complete: Note: Play count is only an estimate, some plays may have been skipped or dynamically added")
self._display.display("", screen_only=True)
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 8fe85d8e2..c9390efe6 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -11,6 +11,7 @@ import pkg_resources
import re
import json
import yaml
+import random
from ansible import errors
from collections import Mapping
@@ -19,6 +20,7 @@ from distutils.version import LooseVersion
from operator import itemgetter
from ansible.parsing.yaml.dumper import AnsibleDumper
from urlparse import urlparse
+from six import string_types
HAS_OPENSSL = False
try:
@@ -37,925 +39,938 @@ except ImportError:
from ansible.utils.unicode import to_unicode as to_text
-# Disabling too-many-public-methods, since filter methods are necessarily
-# public
-# pylint: disable=too-many-public-methods
-class FilterModule(object):
- """ Custom ansible filters """
-
- @staticmethod
- def oo_pdb(arg):
- """ This pops you into a pdb instance where arg is the data passed in
- from the filter.
- Ex: "{{ hostvars | oo_pdb }}"
- """
- pdb.set_trace()
- return arg
-
- @staticmethod
- def get_attr(data, attribute=None):
- """ This looks up dictionary attributes of the form a.b.c and returns
- the value.
-
- If the key isn't present, None is returned.
- Ex: data = {'a': {'b': {'c': 5}}}
- attribute = "a.b.c"
- returns 5
- """
- if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
-
- ptr = data
- for attr in attribute.split('.'):
- if attr in ptr:
- ptr = ptr[attr]
- else:
- ptr = None
- break
-
- return ptr
-
- @staticmethod
- def oo_flatten(data):
- """ This filter plugin will flatten a list of lists
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects to flatten a List")
-
- return [item for sublist in data for item in sublist]
-
- @staticmethod
- def oo_merge_dicts(first_dict, second_dict):
- """ Merge two dictionaries where second_dict values take precedence.
- Ex: first_dict={'a': 1, 'b': 2}
- second_dict={'b': 3, 'c': 4}
- returns {'a': 1, 'b': 3, 'c': 4}
- """
- if not isinstance(first_dict, dict) or not isinstance(second_dict, dict):
- raise errors.AnsibleFilterError("|failed expects to merge two dicts")
- merged = first_dict.copy()
- merged.update(second_dict)
- return merged
-
- @staticmethod
- def oo_merge_hostvars(hostvars, variables, inventory_hostname):
- """ Merge host and play variables.
-
- When ansible version is greater than or equal to 2.0.0,
- merge hostvars[inventory_hostname] with variables (ansible vars)
- otherwise merge hostvars with hostvars['inventory_hostname'].
-
- Ex: hostvars={'master1.example.com': {'openshift_variable': '3'},
- 'openshift_other_variable': '7'}
- variables={'openshift_other_variable': '6'}
- inventory_hostname='master1.example.com'
- returns {'openshift_variable': '3', 'openshift_other_variable': '7'}
-
- hostvars=<ansible.vars.hostvars.HostVars object> (Mapping)
- variables={'openshift_other_variable': '6'}
- inventory_hostname='master1.example.com'
- returns {'openshift_variable': '3', 'openshift_other_variable': '6'}
- """
- if not isinstance(hostvars, Mapping):
- raise errors.AnsibleFilterError("|failed expects hostvars is dictionary or object")
- if not isinstance(variables, dict):
- raise errors.AnsibleFilterError("|failed expects variables is a dictionary")
- if not isinstance(inventory_hostname, basestring):
- raise errors.AnsibleFilterError("|failed expects inventory_hostname is a string")
- # pylint: disable=no-member
- ansible_version = pkg_resources.get_distribution("ansible").version
- merged_hostvars = {}
- if LooseVersion(ansible_version) >= LooseVersion('2.0.0'):
- merged_hostvars = FilterModule.oo_merge_dicts(hostvars[inventory_hostname],
- variables)
- else:
- merged_hostvars = FilterModule.oo_merge_dicts(hostvars[inventory_hostname],
- hostvars)
- return merged_hostvars
-
- @staticmethod
- def oo_collect(data, attribute=None, filters=None):
- """ This takes a list of dict and collects all attributes specified into a
- list. If filter is specified then we will include all items that
- match _ALL_ of filters. If a dict entry is missing the key in a
- filter it will be excluded from the match.
- Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
- {'a':2, 'z': 'z'}, # True, return
- {'a':3, 'z': 'z'}, # True, return
- {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
- ]
- attribute = 'a'
- filters = {'z': 'z'}
- returns [1, 2, 3]
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects to filter on a List")
-
- if not attribute:
- raise errors.AnsibleFilterError("|failed expects attribute to be set")
-
- if filters is not None:
- if not isinstance(filters, dict):
- raise errors.AnsibleFilterError("|failed expects filter to be a"
- " dict")
- retval = [FilterModule.get_attr(d, attribute) for d in data if (
- all([d.get(key, None) == filters[key] for key in filters]))]
- else:
- retval = [FilterModule.get_attr(d, attribute) for d in data]
-
- retval = [val for val in retval if val is not None]
-
- return retval
-
- @staticmethod
- def oo_select_keys_from_list(data, keys):
- """ This returns a list, which contains the value portions for the keys
- Ex: data = { 'a':1, 'b':2, 'c':3 }
- keys = ['a', 'c']
- returns [1, 3]
- """
+def oo_pdb(arg):
+ """ This pops you into a pdb instance where arg is the data passed in
+ from the filter.
+ Ex: "{{ hostvars | oo_pdb }}"
+ """
+ pdb.set_trace()
+ return arg
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects to filter on a list")
- if not isinstance(keys, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
+def get_attr(data, attribute=None):
+ """ This looks up dictionary attributes of the form a.b.c and returns
+ the value.
- # Gather up the values for the list of keys passed in
- retval = [FilterModule.oo_select_keys(item, keys) for item in data]
+ If the key isn't present, None is returned.
+ Ex: data = {'a': {'b': {'c': 5}}}
+ attribute = "a.b.c"
+ returns 5
+ """
+ if not attribute:
+ raise errors.AnsibleFilterError("|failed expects attribute to be set")
- return FilterModule.oo_flatten(retval)
-
- @staticmethod
- def oo_select_keys(data, keys):
- """ This returns a list, which contains the value portions for the keys
- Ex: data = { 'a':1, 'b':2, 'c':3 }
- keys = ['a', 'c']
- returns [1, 3]
- """
-
- if not isinstance(data, Mapping):
- raise errors.AnsibleFilterError("|failed expects to filter on a dict or object")
-
- if not isinstance(keys, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
-
- # Gather up the values for the list of keys passed in
- retval = [data[key] for key in keys if key in data]
-
- return retval
-
- @staticmethod
- def oo_prepend_strings_in_list(data, prepend):
- """ This takes a list of strings and prepends a string to each item in the
- list
- Ex: data = ['cart', 'tree']
- prepend = 'apple-'
- returns ['apple-cart', 'apple-tree']
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
- if not all(isinstance(x, basestring) for x in data):
- raise errors.AnsibleFilterError("|failed expects first param is a list"
- " of strings")
- retval = [prepend + s for s in data]
- return retval
-
- @staticmethod
- def oo_combine_key_value(data, joiner='='):
- """Take a list of dict in the form of { 'key': 'value'} and
- arrange them as a list of strings ['key=value']
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
-
- rval = []
- for item in data:
- rval.append("%s%s%s" % (item['key'], joiner, item['value']))
-
- return rval
-
- @staticmethod
- def oo_combine_dict(data, in_joiner='=', out_joiner=' '):
- """Take a dict in the form of { 'key': 'value', 'key': 'value' } and
- arrange them as a string 'key=value key=value'
- """
- if not isinstance(data, dict):
- # pylint: disable=line-too-long
- raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_combine_dict]. Got %s. Type: %s" % (str(data), str(type(data))))
+ ptr = data
+ for attr in attribute.split('.'):
+ if attr in ptr:
+ ptr = ptr[attr]
+ else:
+ ptr = None
+ break
+
+ return ptr
+
+
+def oo_flatten(data):
+ """ This filter plugin will flatten a list of lists
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects to flatten a List")
+
+ return [item for sublist in data for item in sublist]
+
+
+def oo_merge_dicts(first_dict, second_dict):
+ """ Merge two dictionaries where second_dict values take precedence.
+ Ex: first_dict={'a': 1, 'b': 2}
+ second_dict={'b': 3, 'c': 4}
+ returns {'a': 1, 'b': 3, 'c': 4}
+ """
+ if not isinstance(first_dict, dict) or not isinstance(second_dict, dict):
+ raise errors.AnsibleFilterError("|failed expects to merge two dicts")
+ merged = first_dict.copy()
+ merged.update(second_dict)
+ return merged
+
+
+def oo_merge_hostvars(hostvars, variables, inventory_hostname):
+ """ Merge host and play variables.
+
+ When ansible version is greater than or equal to 2.0.0,
+ merge hostvars[inventory_hostname] with variables (ansible vars)
+ otherwise merge hostvars with hostvars['inventory_hostname'].
+
+ Ex: hostvars={'master1.example.com': {'openshift_variable': '3'},
+ 'openshift_other_variable': '7'}
+ variables={'openshift_other_variable': '6'}
+ inventory_hostname='master1.example.com'
+ returns {'openshift_variable': '3', 'openshift_other_variable': '7'}
+
+ hostvars=<ansible.vars.hostvars.HostVars object> (Mapping)
+ variables={'openshift_other_variable': '6'}
+ inventory_hostname='master1.example.com'
+ returns {'openshift_variable': '3', 'openshift_other_variable': '6'}
+ """
+ if not isinstance(hostvars, Mapping):
+ raise errors.AnsibleFilterError("|failed expects hostvars is dictionary or object")
+ if not isinstance(variables, dict):
+ raise errors.AnsibleFilterError("|failed expects variables is a dictionary")
+ if not isinstance(inventory_hostname, string_types):
+ raise errors.AnsibleFilterError("|failed expects inventory_hostname is a string")
+ # pylint: disable=no-member
+ ansible_version = pkg_resources.get_distribution("ansible").version
+ merged_hostvars = {}
+ if LooseVersion(ansible_version) >= LooseVersion('2.0.0'):
+ merged_hostvars = oo_merge_dicts(
+ hostvars[inventory_hostname], variables)
+ else:
+ merged_hostvars = oo_merge_dicts(
+ hostvars[inventory_hostname], hostvars)
+ return merged_hostvars
+
+
+def oo_collect(data, attribute=None, filters=None):
+ """ This takes a list of dict and collects all attributes specified into a
+ list. If filter is specified then we will include all items that
+ match _ALL_ of filters. If a dict entry is missing the key in a
+ filter it will be excluded from the match.
+ Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
+ {'a':2, 'z': 'z'}, # True, return
+ {'a':3, 'z': 'z'}, # True, return
+ {'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
+ ]
+ attribute = 'a'
+ filters = {'z': 'z'}
+ returns [1, 2, 3]
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects to filter on a List")
+
+ if not attribute:
+ raise errors.AnsibleFilterError("|failed expects attribute to be set")
+
+ if filters is not None:
+ if not isinstance(filters, dict):
+ raise errors.AnsibleFilterError("|failed expects filter to be a"
+ " dict")
+ retval = [get_attr(d, attribute) for d in data if (
+ all([d.get(key, None) == filters[key] for key in filters]))]
+ else:
+ retval = [get_attr(d, attribute) for d in data]
+
+ retval = [val for val in retval if val is not None]
+
+ return retval
+
+
+def oo_select_keys_from_list(data, keys):
+ """ This returns a list, which contains the value portions for the keys
+ Ex: data = { 'a':1, 'b':2, 'c':3 }
+ keys = ['a', 'c']
+ returns [1, 3]
+ """
+
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects to filter on a list")
+
+ if not isinstance(keys, list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+ # Gather up the values for the list of keys passed in
+ retval = [oo_select_keys(item, keys) for item in data]
+
+ return oo_flatten(retval)
+
+
+def oo_select_keys(data, keys):
+ """ This returns a list, which contains the value portions for the keys
+ Ex: data = { 'a':1, 'b':2, 'c':3 }
+ keys = ['a', 'c']
+ returns [1, 3]
+ """
+
+ if not isinstance(data, Mapping):
+ raise errors.AnsibleFilterError("|failed expects to filter on a dict or object")
+
+ if not isinstance(keys, list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+ # Gather up the values for the list of keys passed in
+ retval = [data[key] for key in keys if key in data]
+
+ return retval
+
+
+def oo_prepend_strings_in_list(data, prepend):
+ """ This takes a list of strings and prepends a string to each item in the
+ list
+ Ex: data = ['cart', 'tree']
+ prepend = 'apple-'
+ returns ['apple-cart', 'apple-tree']
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+ if not all(isinstance(x, string_types) for x in data):
+ raise errors.AnsibleFilterError("|failed expects first param is a list"
+ " of strings")
+ retval = [prepend + s for s in data]
+ return retval
+
+
+def oo_combine_key_value(data, joiner='='):
+ """Take a list of dict in the form of { 'key': 'value'} and
+ arrange them as a list of strings ['key=value']
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
+
+ rval = []
+ for item in data:
+ rval.append("%s%s%s" % (item['key'], joiner, item['value']))
+
+ return rval
+
+
+def oo_combine_dict(data, in_joiner='=', out_joiner=' '):
+ """Take a dict in the form of { 'key': 'value', 'key': 'value' } and
+ arrange them as a string 'key=value key=value'
+ """
+ if not isinstance(data, dict):
+ # pylint: disable=line-too-long
+ raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_combine_dict]. Got %s. Type: %s" % (str(data), str(type(data))))
+
+ return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()])
- return out_joiner.join([in_joiner.join([k, str(v)]) for k, v in data.items()])
- @staticmethod
- def oo_ami_selector(data, image_name):
- """ This takes a list of amis and an image name and attempts to return
- the latest ami.
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects first param is a list")
+def oo_ami_selector(data, image_name):
+ """ This takes a list of amis and an image name and attempts to return
+ the latest ami.
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects first param is a list")
- if not data:
- return None
+ if not data:
+ return None
+ else:
+ if image_name is None or not image_name.endswith('_*'):
+ ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
+ return ami['ami_id']
else:
- if image_name is None or not image_name.endswith('_*'):
- ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
- return ami['ami_id']
- else:
- ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
- ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
- return ami['ami_id']
-
- @staticmethod
- def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
- """ This takes a dictionary of volume definitions and returns a valid ec2
- volume definition based on the host_type and the values in the
- dictionary.
- The dictionary should look similar to this:
- { 'master':
- { 'root':
- { 'volume_size': 10, 'device_type': 'gp2',
- 'iops': 500
- },
- 'docker':
- { 'volume_size': 40, 'device_type': 'gp2',
- 'iops': 500, 'ephemeral': 'true'
- }
+ ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
+ ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
+ return ami['ami_id']
+
+
+def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
+ """ This takes a dictionary of volume definitions and returns a valid ec2
+ volume definition based on the host_type and the values in the
+ dictionary.
+ The dictionary should look similar to this:
+ { 'master':
+ { 'root':
+ { 'volume_size': 10, 'device_type': 'gp2',
+ 'iops': 500
+ },
+ 'docker':
+ { 'volume_size': 40, 'device_type': 'gp2',
+ 'iops': 500, 'ephemeral': 'true'
+ }
+ },
+ 'node':
+ { 'root':
+ { 'volume_size': 10, 'device_type': 'io1',
+ 'iops': 1000
},
- 'node':
- { 'root':
- { 'volume_size': 10, 'device_type': 'io1',
- 'iops': 1000
- },
- 'docker':
- { 'volume_size': 40, 'device_type': 'gp2',
- 'iops': 500, 'ephemeral': 'true'
- }
+ 'docker':
+ { 'volume_size': 40, 'device_type': 'gp2',
+ 'iops': 500, 'ephemeral': 'true'
}
}
- """
- if not isinstance(data, dict):
- # pylint: disable=line-too-long
- raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_ec2_volume_def]. Got %s. Type: %s" % (str(data), str(type(data))))
- if host_type not in ['master', 'node', 'etcd']:
- raise errors.AnsibleFilterError("|failed expects etcd, master or node"
- " as the host type")
-
- root_vol = data[host_type]['root']
- root_vol['device_name'] = '/dev/sda1'
- root_vol['delete_on_termination'] = True
- if root_vol['device_type'] != 'io1':
- root_vol.pop('iops', None)
- if host_type in ['master', 'node'] and 'docker' in data[host_type]:
- docker_vol = data[host_type]['docker']
- docker_vol['device_name'] = '/dev/xvdb'
- docker_vol['delete_on_termination'] = True
- if docker_vol['device_type'] != 'io1':
- docker_vol.pop('iops', None)
- if docker_ephemeral:
- docker_vol.pop('device_type', None)
- docker_vol.pop('delete_on_termination', None)
- docker_vol['ephemeral'] = 'ephemeral0'
- return [root_vol, docker_vol]
- elif host_type == 'etcd' and 'etcd' in data[host_type]:
- etcd_vol = data[host_type]['etcd']
- etcd_vol['device_name'] = '/dev/xvdb'
- etcd_vol['delete_on_termination'] = True
- if etcd_vol['device_type'] != 'io1':
- etcd_vol.pop('iops', None)
- return [root_vol, etcd_vol]
- return [root_vol]
-
- @staticmethod
- def oo_split(string, separator=','):
- """ This splits the input string into a list. If the input string is
- already a list we will return it as is.
- """
- if isinstance(string, list):
- return string
- return string.split(separator)
-
- @staticmethod
- def oo_haproxy_backend_masters(hosts, port):
- """ This takes an array of dicts and returns an array of dicts
- to be used as a backend for the haproxy role
- """
- servers = []
- for idx, host_info in enumerate(hosts):
- server = dict(name="master%s" % idx)
- server_ip = host_info['openshift']['common']['ip']
- server['address'] = "%s:%s" % (server_ip, port)
- server['opts'] = 'check'
- servers.append(server)
- return servers
-
- @staticmethod
- def oo_filter_list(data, filter_attr=None):
- """ This returns a list, which contains all items where filter_attr
- evaluates to true
- Ex: data = [ { a: 1, b: True },
- { a: 3, b: False },
- { a: 5, b: True } ]
- filter_attr = 'b'
- returns [ { a: 1, b: True },
- { a: 5, b: True } ]
- """
- if not isinstance(data, list):
- raise errors.AnsibleFilterError("|failed expects to filter on a list")
-
- if not isinstance(filter_attr, basestring):
- raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode")
-
- # Gather up the values for the list of keys passed in
- return [x for x in data if filter_attr in x and x[filter_attr]]
-
- @staticmethod
- def oo_nodes_with_label(nodes, label, value=None):
- """ Filters a list of nodes by label and value (if provided)
-
- It handles labels that are in the following variables by priority:
- openshift_node_labels, cli_openshift_node_labels, openshift['node']['labels']
-
- Examples:
- data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
- 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
- 'c': {'openshift_node_labels': {'size': 'S'}}]
- label = 'color'
- returns = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
- 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}]
-
- data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
- 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
- 'c': {'openshift_node_labels': {'size': 'S'}}]
- label = 'color'
- value = 'green'
- returns = ['b': {'labels': {'color': 'green', 'size': 'L'}}]
-
- Args:
- nodes (list[dict]): list of node to node variables
- label (str): label to filter `nodes` by
- value (Optional[str]): value of `label` to filter by Defaults
- to None.
-
- Returns:
- list[dict]: nodes filtered by label and value (if provided)
- """
- if not isinstance(nodes, list):
- raise errors.AnsibleFilterError("failed expects to filter on a list")
- if not isinstance(label, basestring):
- raise errors.AnsibleFilterError("failed expects label to be a string")
- if value is not None and not isinstance(value, basestring):
- raise errors.AnsibleFilterError("failed expects value to be a string")
-
- def label_filter(node):
- """ filter function for testing if node should be returned """
- if not isinstance(node, dict):
- raise errors.AnsibleFilterError("failed expects to filter on a list of dicts")
- if 'openshift_node_labels' in node:
- labels = node['openshift_node_labels']
- elif 'cli_openshift_node_labels' in node:
- labels = node['cli_openshift_node_labels']
- elif 'openshift' in node and 'node' in node['openshift'] and 'labels' in node['openshift']['node']:
- labels = node['openshift']['node']['labels']
- else:
- return False
-
- if isinstance(labels, basestring):
- labels = yaml.safe_load(labels)
- if not isinstance(labels, dict):
- raise errors.AnsibleFilterError(
- "failed expected node labels to be a dict or serializable to a dict"
- )
- return label in labels and (value is None or labels[label] == value)
-
- return [n for n in nodes if label_filter(n)]
-
- @staticmethod
- def oo_parse_heat_stack_outputs(data):
- """ Formats the HEAT stack output into a usable form
-
- The goal is to transform something like this:
-
- +---------------+-------------------------------------------------+
- | Property | Value |
- +---------------+-------------------------------------------------+
- | capabilities | [] | |
- | creation_time | 2015-06-26T12:26:26Z | |
- | description | OpenShift cluster | |
- | … | … |
- | outputs | [ |
- | | { |
- | | "output_value": "value_A" |
- | | "description": "This is the value of Key_A" |
- | | "output_key": "Key_A" |
- | | }, |
- | | { |
- | | "output_value": [ |
- | | "value_B1", |
- | | "value_B2" |
- | | ], |
- | | "description": "This is the value of Key_B" |
- | | "output_key": "Key_B" |
- | | }, |
- | | ] |
- | parameters | { |
- | … | … |
- +---------------+-------------------------------------------------+
-
- into something like this:
-
- {
- "Key_A": "value_A",
- "Key_B": [
- "value_B1",
- "value_B2"
- ]
}
- """
+ """
+ if not isinstance(data, dict):
+ # pylint: disable=line-too-long
+ raise errors.AnsibleFilterError("|failed expects first param is a dict [oo_ec2_volume_def]. Got %s. Type: %s" % (str(data), str(type(data))))
+ if host_type not in ['master', 'node', 'etcd']:
+ raise errors.AnsibleFilterError("|failed expects etcd, master or node"
+ " as the host type")
+
+ root_vol = data[host_type]['root']
+ root_vol['device_name'] = '/dev/sda1'
+ root_vol['delete_on_termination'] = True
+ if root_vol['device_type'] != 'io1':
+ root_vol.pop('iops', None)
+ if host_type in ['master', 'node'] and 'docker' in data[host_type]:
+ docker_vol = data[host_type]['docker']
+ docker_vol['device_name'] = '/dev/xvdb'
+ docker_vol['delete_on_termination'] = True
+ if docker_vol['device_type'] != 'io1':
+ docker_vol.pop('iops', None)
+ if docker_ephemeral:
+ docker_vol.pop('device_type', None)
+ docker_vol.pop('delete_on_termination', None)
+ docker_vol['ephemeral'] = 'ephemeral0'
+ return [root_vol, docker_vol]
+ elif host_type == 'etcd' and 'etcd' in data[host_type]:
+ etcd_vol = data[host_type]['etcd']
+ etcd_vol['device_name'] = '/dev/xvdb'
+ etcd_vol['delete_on_termination'] = True
+ if etcd_vol['device_type'] != 'io1':
+ etcd_vol.pop('iops', None)
+ return [root_vol, etcd_vol]
+ return [root_vol]
+
+
+def oo_split(string, separator=','):
+ """ This splits the input string into a list. If the input string is
+ already a list we will return it as is.
+ """
+ if isinstance(string, list):
+ return string
+ return string.split(separator)
+
+
+def oo_haproxy_backend_masters(hosts, port):
+ """ This takes an array of dicts and returns an array of dicts
+ to be used as a backend for the haproxy role
+ """
+ servers = []
+ for idx, host_info in enumerate(hosts):
+ server = dict(name="master%s" % idx)
+ server_ip = host_info['openshift']['common']['ip']
+ server['address'] = "%s:%s" % (server_ip, port)
+ server['opts'] = 'check'
+ servers.append(server)
+ return servers
+
+
+def oo_filter_list(data, filter_attr=None):
+ """ This returns a list, which contains all items where filter_attr
+ evaluates to true
+ Ex: data = [ { a: 1, b: True },
+ { a: 3, b: False },
+ { a: 5, b: True } ]
+ filter_attr = 'b'
+ returns [ { a: 1, b: True },
+ { a: 5, b: True } ]
+ """
+ if not isinstance(data, list):
+ raise errors.AnsibleFilterError("|failed expects to filter on a list")
+
+ if not isinstance(filter_attr, string_types):
+ raise errors.AnsibleFilterError("|failed expects filter_attr is a str or unicode")
+
+ # Gather up the values for the list of keys passed in
+ return [x for x in data if filter_attr in x and x[filter_attr]]
+
+
+def oo_nodes_with_label(nodes, label, value=None):
+ """ Filters a list of nodes by label and value (if provided)
+
+ It handles labels that are in the following variables by priority:
+ openshift_node_labels, cli_openshift_node_labels, openshift['node']['labels']
+
+ Examples:
+ data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
+ 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
+ 'c': {'openshift_node_labels': {'size': 'S'}}]
+ label = 'color'
+ returns = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
+ 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}}]
+
+ data = ['a': {'openshift_node_labels': {'color': 'blue', 'size': 'M'}},
+ 'b': {'openshift_node_labels': {'color': 'green', 'size': 'L'}},
+ 'c': {'openshift_node_labels': {'size': 'S'}}]
+ label = 'color'
+ value = 'green'
+ returns = ['b': {'labels': {'color': 'green', 'size': 'L'}}]
+
+ Args:
+ nodes (list[dict]): list of node to node variables
+ label (str): label to filter `nodes` by
+ value (Optional[str]): value of `label` to filter by Defaults
+ to None.
+
+ Returns:
+ list[dict]: nodes filtered by label and value (if provided)
+ """
+ if not isinstance(nodes, list):
+ raise errors.AnsibleFilterError("failed expects to filter on a list")
+ if not isinstance(label, string_types):
+ raise errors.AnsibleFilterError("failed expects label to be a string")
+ if value is not None and not isinstance(value, string_types):
+ raise errors.AnsibleFilterError("failed expects value to be a string")
+
+ def label_filter(node):
+ """ filter function for testing if node should be returned """
+ if not isinstance(node, dict):
+ raise errors.AnsibleFilterError("failed expects to filter on a list of dicts")
+ if 'openshift_node_labels' in node:
+ labels = node['openshift_node_labels']
+ elif 'cli_openshift_node_labels' in node:
+ labels = node['cli_openshift_node_labels']
+ elif 'openshift' in node and 'node' in node['openshift'] and 'labels' in node['openshift']['node']:
+ labels = node['openshift']['node']['labels']
+ else:
+ return False
+
+ if isinstance(labels, string_types):
+ labels = yaml.safe_load(labels)
+ if not isinstance(labels, dict):
+ raise errors.AnsibleFilterError(
+ "failed expected node labels to be a dict or serializable to a dict"
+ )
+ return label in labels and (value is None or labels[label] == value)
+
+ return [n for n in nodes if label_filter(n)]
+
+
+def oo_parse_heat_stack_outputs(data):
+ """ Formats the HEAT stack output into a usable form
+
+ The goal is to transform something like this:
+
+ +---------------+-------------------------------------------------+
+ | Property | Value |
+ +---------------+-------------------------------------------------+
+ | capabilities | [] | |
+ | creation_time | 2015-06-26T12:26:26Z | |
+ | description | OpenShift cluster | |
+ | … | … |
+ | outputs | [ |
+ | | { |
+ | | "output_value": "value_A" |
+ | | "description": "This is the value of Key_A" |
+ | | "output_key": "Key_A" |
+ | | }, |
+ | | { |
+ | | "output_value": [ |
+ | | "value_B1", |
+ | | "value_B2" |
+ | | ], |
+ | | "description": "This is the value of Key_B" |
+ | | "output_key": "Key_B" |
+ | | }, |
+ | | ] |
+ | parameters | { |
+ | … | … |
+ +---------------+-------------------------------------------------+
+
+ into something like this:
+
+ {
+ "Key_A": "value_A",
+ "Key_B": [
+ "value_B1",
+ "value_B2"
+ ]
+ }
+ """
+
+ # Extract the “outputs” JSON snippet from the pretty-printed array
+ in_outputs = False
+ outputs = ''
+
+ line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|')
+ for line in data['stdout_lines']:
+ match = line_regex.match(line)
+ if match:
+ if match.group(1) == 'outputs':
+ in_outputs = True
+ elif match.group(1) != '':
+ in_outputs = False
+ if in_outputs:
+ outputs += match.group(2)
+
+ outputs = json.loads(outputs)
+
+ # Revamp the “outputs” to put it in the form of a “Key: value” map
+ revamped_outputs = {}
+ for output in outputs:
+ revamped_outputs[output['output_key']] = output['output_value']
+
+ return revamped_outputs
+
+
+# pylint: disable=too-many-branches
+def oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames):
+ """ Parses names from list of certificate hashes.
+
+ Ex: certificates = [{ "certfile": "/root/custom1.crt",
+ "keyfile": "/root/custom1.key",
+ "cafile": "/root/custom-ca1.crt" },
+ { "certfile": "custom2.crt",
+ "keyfile": "custom2.key",
+ "cafile": "custom-ca2.crt" }]
+
+ returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt",
+ "keyfile": "/etc/origin/master/named_certificates/custom1.key",
+ "cafile": "/etc/origin/master/named_certificates/custom-ca1.crt",
+ "names": [ "public-master-host.com",
+ "other-master-host.com" ] },
+ { "certfile": "/etc/origin/master/named_certificates/custom2.crt",
+ "keyfile": "/etc/origin/master/named_certificates/custom2.key",
+ "cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt",
+ "names": [ "some-hostname.com" ] }]
+ """
+ if not isinstance(named_certs_dir, string_types):
+ raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode")
+
+ if not isinstance(internal_hostnames, list):
+ raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
+
+ if not HAS_OPENSSL:
+ raise errors.AnsibleFilterError("|missing OpenSSL python bindings")
+
+ for certificate in certificates:
+ if 'names' in certificate.keys():
+ continue
+ else:
+ certificate['names'] = []
- # Extract the “outputs” JSON snippet from the pretty-printed array
- in_outputs = False
- outputs = ''
-
- line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|')
- for line in data['stdout_lines']:
- match = line_regex.match(line)
- if match:
- if match.group(1) == 'outputs':
- in_outputs = True
- elif match.group(1) != '':
- in_outputs = False
- if in_outputs:
- outputs += match.group(2)
-
- outputs = json.loads(outputs)
-
- # Revamp the “outputs” to put it in the form of a “Key: value” map
- revamped_outputs = {}
- for output in outputs:
- revamped_outputs[output['output_key']] = output['output_value']
-
- return revamped_outputs
-
- @staticmethod
- # pylint: disable=too-many-branches
- def oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames):
- """ Parses names from list of certificate hashes.
-
- Ex: certificates = [{ "certfile": "/root/custom1.crt",
- "keyfile": "/root/custom1.key",
- "cafile": "/root/custom-ca1.crt" },
- { "certfile": "custom2.crt",
- "keyfile": "custom2.key",
- "cafile": "custom-ca2.crt" }]
-
- returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt",
- "keyfile": "/etc/origin/master/named_certificates/custom1.key",
- "cafile": "/etc/origin/master/named_certificates/custom-ca1.crt",
- "names": [ "public-master-host.com",
- "other-master-host.com" ] },
- { "certfile": "/etc/origin/master/named_certificates/custom2.crt",
- "keyfile": "/etc/origin/master/named_certificates/custom2.key",
- "cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt",
- "names": [ "some-hostname.com" ] }]
- """
- if not isinstance(named_certs_dir, basestring):
- raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode")
-
- if not isinstance(internal_hostnames, list):
- raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
-
- if not HAS_OPENSSL:
- raise errors.AnsibleFilterError("|missing OpenSSL python bindings")
-
- for certificate in certificates:
- if 'names' in certificate.keys():
- continue
- else:
- certificate['names'] = []
-
- if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
- raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
- (certificate['certfile'], certificate['keyfile']))
-
- try:
- st_cert = open(certificate['certfile'], 'rt').read()
- cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
- certificate['names'].append(str(cert.get_subject().commonName.decode()))
- for i in range(cert.get_extension_count()):
- if cert.get_extension(i).get_short_name() == 'subjectAltName':
- for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
- certificate['names'].append(name)
- except:
- raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
- "please specify certificate names in host inventory"))
-
- certificate['names'] = list(set(certificate['names']))
- if 'cafile' not in certificate:
- certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
- if not certificate['names']:
- raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
- "detected a collision with internal hostname, please specify " +
- "certificate names in host inventory"))
-
- for certificate in certificates:
- # Update paths for configuration
- certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile']))
- certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile']))
- if 'cafile' in certificate:
- certificate['cafile'] = os.path.join(named_certs_dir, os.path.basename(certificate['cafile']))
- return certificates
-
- @staticmethod
- def oo_pretty_print_cluster(data, prefix='tag_'):
- """ Read a subset of hostvars and build a summary of the cluster
- in the following layout:
+ if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
+ raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
+ (certificate['certfile'], certificate['keyfile']))
+
+ try:
+ st_cert = open(certificate['certfile'], 'rt').read()
+ cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
+ certificate['names'].append(str(cert.get_subject().commonName.decode()))
+ for i in range(cert.get_extension_count()):
+ if cert.get_extension(i).get_short_name() == 'subjectAltName':
+ for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '):
+ certificate['names'].append(name)
+ except Exception:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
+ "please specify certificate names in host inventory"))
+
+ certificate['names'] = list(set(certificate['names']))
+ if 'cafile' not in certificate:
+ certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
+ if not certificate['names']:
+ raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
+ "detected a collision with internal hostname, please specify " +
+ "certificate names in host inventory"))
+
+ for certificate in certificates:
+ # Update paths for configuration
+ certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile']))
+ certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile']))
+ if 'cafile' in certificate:
+ certificate['cafile'] = os.path.join(named_certs_dir, os.path.basename(certificate['cafile']))
+ return certificates
+
+
+def oo_pretty_print_cluster(data, prefix='tag_'):
+ """ Read a subset of hostvars and build a summary of the cluster
+ in the following layout:
"c_id": {
- "master": {
- "default": [
- { "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1" }
- ]
- "node": {
- "infra": [
- { "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2" }
- ],
- "compute": [
- { "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3" },
- ...
- ]
- }
+"master": {
+"default": [
+ { "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1" }
+]
+"node": {
+"infra": [
+ { "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2" }
+],
+"compute": [
+ { "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3" },
+...
+]
+}
+ """
+
+ def _get_tag_value(tags, key):
+ """ Extract values of a map implemented as a set.
+ Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' }
+ key = 'bar'
+ returns 'value2'
"""
-
- def _get_tag_value(tags, key):
- """ Extract values of a map implemented as a set.
- Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' }
- key = 'bar'
- returns 'value2'
- """
- for tag in tags:
- if tag[:len(prefix) + len(key)] == prefix + key:
- return tag[len(prefix) + len(key) + 1:]
- raise KeyError(key)
-
- def _add_host(clusters,
- clusterid,
- host_type,
- sub_host_type,
- host):
- """ Add a new host in the clusters data structure """
- if clusterid not in clusters:
- clusters[clusterid] = {}
- if host_type not in clusters[clusterid]:
- clusters[clusterid][host_type] = {}
- if sub_host_type not in clusters[clusterid][host_type]:
- clusters[clusterid][host_type][sub_host_type] = []
- clusters[clusterid][host_type][sub_host_type].append(host)
-
- clusters = {}
- for host in data:
- try:
- _add_host(clusters=clusters,
- clusterid=_get_tag_value(host['group_names'], 'clusterid'),
- host_type=_get_tag_value(host['group_names'], 'host-type'),
- sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
- host={'name': host['inventory_hostname'],
- 'public IP': host['oo_public_ipv4'],
- 'private IP': host['oo_private_ipv4']})
- except KeyError:
- pass
- return clusters
-
- @staticmethod
- def oo_generate_secret(num_bytes):
- """ generate a session secret """
-
- if not isinstance(num_bytes, int):
- raise errors.AnsibleFilterError("|failed expects num_bytes is int")
-
- secret = os.urandom(num_bytes)
- return secret.encode('base-64').strip()
-
- @staticmethod
- def to_padded_yaml(data, level=0, indent=2, **kw):
- """ returns a yaml snippet padded to match the indent level you specify """
- if data in [None, ""]:
- return ""
-
+ for tag in tags:
+ if tag[:len(prefix) + len(key)] == prefix + key:
+ return tag[len(prefix) + len(key) + 1:]
+ raise KeyError(key)
+
+ def _add_host(clusters,
+ clusterid,
+ host_type,
+ sub_host_type,
+ host):
+ """ Add a new host in the clusters data structure """
+ if clusterid not in clusters:
+ clusters[clusterid] = {}
+ if host_type not in clusters[clusterid]:
+ clusters[clusterid][host_type] = {}
+ if sub_host_type not in clusters[clusterid][host_type]:
+ clusters[clusterid][host_type][sub_host_type] = []
+ clusters[clusterid][host_type][sub_host_type].append(host)
+
+ clusters = {}
+ for host in data:
try:
- transformed = yaml.dump(data, indent=indent, allow_unicode=True,
- default_flow_style=False,
- Dumper=AnsibleDumper, **kw)
- padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
- return to_text("\n{0}".format(padded))
- except Exception as my_e:
- raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
-
- @staticmethod
- def oo_openshift_env(hostvars):
- ''' Return facts which begin with "openshift_" and translate
- legacy facts to their openshift_env counterparts.
-
- Ex: hostvars = {'openshift_fact': 42,
- 'theyre_taking_the_hobbits_to': 'isengard'}
- returns = {'openshift_fact': 42}
- '''
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
-
- facts = {}
- regex = re.compile('^openshift_.*')
- for key in hostvars:
- if regex.match(key):
- facts[key] = hostvars[key]
-
- migrations = {'openshift_router_selector': 'openshift_hosted_router_selector',
- 'openshift_registry_selector': 'openshift_hosted_registry_selector'}
- for old_fact, new_fact in migrations.iteritems():
- if old_fact in facts and new_fact not in facts:
- facts[new_fact] = facts[old_fact]
- return facts
-
- @staticmethod
- # pylint: disable=too-many-branches, too-many-nested-blocks
- def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
- """ Generate list of persistent volumes based on oo_openshift_env
- storage options set in host variables.
- """
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
- if not issubclass(type(groups), dict):
- raise errors.AnsibleFilterError("|failed expects groups is a dict")
- if persistent_volumes is not None and not issubclass(type(persistent_volumes), list):
- raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list")
-
- if persistent_volumes is None:
- persistent_volumes = []
- if 'hosted' in hostvars['openshift']:
- for component in hostvars['openshift']['hosted']:
- if 'storage' in hostvars['openshift']['hosted'][component]:
- params = hostvars['openshift']['hosted'][component]['storage']
- kind = params['kind']
- create_pv = params['create_pv']
- if kind is not None and create_pv:
- if kind == 'nfs':
- host = params['host']
- if host is None:
- if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
- host = groups['oo_nfs_to_config'][0]
- else:
- raise errors.AnsibleFilterError("|failed no storage host detected")
- directory = params['nfs']['directory']
- volume = params['volume']['name']
- path = directory + '/' + volume
- size = params['volume']['size']
- access_modes = params['access']['modes']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- access_modes=access_modes,
- storage=dict(
- nfs=dict(
- server=host,
- path=path)))
- persistent_volumes.append(persistent_volume)
- elif kind == 'openstack':
- volume = params['volume']['name']
- size = params['volume']['size']
- access_modes = params['access']['modes']
- filesystem = params['openstack']['filesystem']
- volume_id = params['openstack']['volumeID']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- access_modes=access_modes,
- storage=dict(
- cinder=dict(
- fsType=filesystem,
- volumeID=volume_id)))
- persistent_volumes.append(persistent_volume)
- elif not (kind == 'object' or kind == 'dynamic'):
- msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
- kind,
- component)
- raise errors.AnsibleFilterError(msg)
- return persistent_volumes
-
- @staticmethod
- def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
- """ Generate list of persistent volume claims based on oo_openshift_env
- storage options set in host variables.
- """
- if not issubclass(type(hostvars), dict):
- raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
- if persistent_volume_claims is not None and not issubclass(type(persistent_volume_claims), list):
- raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list")
-
- if persistent_volume_claims is None:
- persistent_volume_claims = []
- if 'hosted' in hostvars['openshift']:
- for component in hostvars['openshift']['hosted']:
- if 'storage' in hostvars['openshift']['hosted'][component]:
- params = hostvars['openshift']['hosted'][component]['storage']
- kind = params['kind']
- create_pv = params['create_pv']
- create_pvc = params['create_pvc']
- if kind not in [None, 'object'] and create_pv and create_pvc:
+ _add_host(clusters=clusters,
+ clusterid=_get_tag_value(host['group_names'], 'clusterid'),
+ host_type=_get_tag_value(host['group_names'], 'host-type'),
+ sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
+ host={'name': host['inventory_hostname'],
+ 'public IP': host['oo_public_ipv4'],
+ 'private IP': host['oo_private_ipv4']})
+ except KeyError:
+ pass
+ return clusters
+
+
+def oo_generate_secret(num_bytes):
+ """ generate a session secret """
+
+ if not isinstance(num_bytes, int):
+ raise errors.AnsibleFilterError("|failed expects num_bytes is int")
+
+ secret = os.urandom(num_bytes)
+ return secret.encode('base-64').strip()
+
+
+def to_padded_yaml(data, level=0, indent=2, **kw):
+ """ returns a yaml snippet padded to match the indent level you specify """
+ if data in [None, ""]:
+ return ""
+
+ try:
+ transformed = yaml.dump(data, indent=indent, allow_unicode=True,
+ default_flow_style=False,
+ Dumper=AnsibleDumper, **kw)
+ padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
+ return to_text("\n{0}".format(padded))
+ except Exception as my_e:
+ raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
+
+
+def oo_openshift_env(hostvars):
+ ''' Return facts which begin with "openshift_" and translate
+ legacy facts to their openshift_env counterparts.
+
+ Ex: hostvars = {'openshift_fact': 42,
+ 'theyre_taking_the_hobbits_to': 'isengard'}
+ returns = {'openshift_fact': 42}
+ '''
+ if not issubclass(type(hostvars), dict):
+ raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
+
+ facts = {}
+ regex = re.compile('^openshift_.*')
+ for key in hostvars:
+ if regex.match(key):
+ facts[key] = hostvars[key]
+
+ migrations = {'openshift_router_selector': 'openshift_hosted_router_selector',
+ 'openshift_registry_selector': 'openshift_hosted_registry_selector'}
+ for old_fact, new_fact in migrations.items():
+ if old_fact in facts and new_fact not in facts:
+ facts[new_fact] = facts[old_fact]
+ return facts
+
+
+# pylint: disable=too-many-branches, too-many-nested-blocks
+def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
+ """ Generate list of persistent volumes based on oo_openshift_env
+ storage options set in host variables.
+ """
+ if not issubclass(type(hostvars), dict):
+ raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
+ if not issubclass(type(groups), dict):
+ raise errors.AnsibleFilterError("|failed expects groups is a dict")
+ if persistent_volumes is not None and not issubclass(type(persistent_volumes), list):
+ raise errors.AnsibleFilterError("|failed expects persistent_volumes is a list")
+
+ if persistent_volumes is None:
+ persistent_volumes = []
+ if 'hosted' in hostvars['openshift']:
+ for component in hostvars['openshift']['hosted']:
+ if 'storage' in hostvars['openshift']['hosted'][component]:
+ params = hostvars['openshift']['hosted'][component]['storage']
+ kind = params['kind']
+ create_pv = params['create_pv']
+ if kind is not None and create_pv:
+ if kind == 'nfs':
+ host = params['host']
+ if host is None:
+ if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
+ host = groups['oo_nfs_to_config'][0]
+ else:
+ raise errors.AnsibleFilterError("|failed no storage host detected")
+ directory = params['nfs']['directory']
volume = params['volume']['name']
+ path = directory + '/' + volume
size = params['volume']['size']
access_modes = params['access']['modes']
- persistent_volume_claim = dict(
- name="{0}-claim".format(volume),
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
capacity=size,
- access_modes=access_modes)
- persistent_volume_claims.append(persistent_volume_claim)
- return persistent_volume_claims
-
- @staticmethod
- def oo_31_rpm_rename_conversion(rpms, openshift_version=None):
- """ Filters a list of 3.0 rpms and return the corresponding 3.1 rpms
- names with proper version (if provided)
-
- If 3.1 rpms are passed in they will only be augmented with the
- correct version. This is important for hosts that are running both
- Masters and Nodes.
- """
- if not isinstance(rpms, list):
- raise errors.AnsibleFilterError("failed expects to filter on a list")
- if openshift_version is not None and not isinstance(openshift_version, basestring):
- raise errors.AnsibleFilterError("failed expects openshift_version to be a string")
-
- rpms_31 = []
- for rpm in rpms:
- if 'atomic' not in rpm:
- rpm = rpm.replace("openshift", "atomic-openshift")
- if openshift_version:
- rpm = rpm + openshift_version
- rpms_31.append(rpm)
-
- return rpms_31
-
- @staticmethod
- def oo_pods_match_component(pods, deployment_type, component):
- """ Filters a list of Pods and returns the ones matching the deployment_type and component
- """
- if not isinstance(pods, list):
- raise errors.AnsibleFilterError("failed expects to filter on a list")
- if not isinstance(deployment_type, basestring):
- raise errors.AnsibleFilterError("failed expects deployment_type to be a string")
- if not isinstance(component, basestring):
- raise errors.AnsibleFilterError("failed expects component to be a string")
-
- image_prefix = 'openshift/origin-'
- if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
- image_prefix = 'openshift3/ose-'
- elif deployment_type == 'atomic-enterprise':
- image_prefix = 'aep3_beta/aep-'
-
- matching_pods = []
- image_regex = image_prefix + component + r'.*'
- for pod in pods:
- for container in pod['spec']['containers']:
- if re.search(image_regex, container['image']):
- matching_pods.append(pod)
- break # stop here, don't add a pod more than once
-
- return matching_pods
-
- @staticmethod
- def oo_get_hosts_from_hostvars(hostvars, hosts):
- """ Return a list of hosts from hostvars """
- retval = []
- for host in hosts:
- try:
- retval.append(hostvars[host])
- except errors.AnsibleError:
- # host does not exist
- pass
-
- return retval
-
- @staticmethod
- def oo_image_tag_to_rpm_version(version, include_dash=False):
- """ Convert an image tag string to an RPM version if necessary
- Empty strings and strings that are already in rpm version format
- are ignored. Also remove non semantic version components.
-
- Ex. v3.2.0.10 -> -3.2.0.10
- v1.2.0-rc1 -> -1.2.0
- """
- if not isinstance(version, basestring):
- raise errors.AnsibleFilterError("|failed expects a string or unicode")
- if version.startswith("v"):
- version = version[1:]
- # Strip release from requested version, we no longer support this.
- version = version.split('-')[0]
+ access_modes=access_modes,
+ storage=dict(
+ nfs=dict(
+ server=host,
+ path=path)))
+ persistent_volumes.append(persistent_volume)
+ elif kind == 'openstack':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ access_modes = params['access']['modes']
+ filesystem = params['openstack']['filesystem']
+ volume_id = params['openstack']['volumeID']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ access_modes=access_modes,
+ storage=dict(
+ cinder=dict(
+ fsType=filesystem,
+ volumeID=volume_id)))
+ persistent_volumes.append(persistent_volume)
+ elif not (kind == 'object' or kind == 'dynamic'):
+ msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
+ kind,
+ component)
+ raise errors.AnsibleFilterError(msg)
+ return persistent_volumes
+
+
+def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
+ """ Generate list of persistent volume claims based on oo_openshift_env
+ storage options set in host variables.
+ """
+ if not issubclass(type(hostvars), dict):
+ raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
+ if persistent_volume_claims is not None and not issubclass(type(persistent_volume_claims), list):
+ raise errors.AnsibleFilterError("|failed expects persistent_volume_claims is a list")
+
+ if persistent_volume_claims is None:
+ persistent_volume_claims = []
+ if 'hosted' in hostvars['openshift']:
+ for component in hostvars['openshift']['hosted']:
+ if 'storage' in hostvars['openshift']['hosted'][component]:
+ params = hostvars['openshift']['hosted'][component]['storage']
+ kind = params['kind']
+ create_pv = params['create_pv']
+ create_pvc = params['create_pvc']
+ if kind not in [None, 'object'] and create_pv and create_pvc:
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ access_modes = params['access']['modes']
+ persistent_volume_claim = dict(
+ name="{0}-claim".format(volume),
+ capacity=size,
+ access_modes=access_modes)
+ persistent_volume_claims.append(persistent_volume_claim)
+ return persistent_volume_claims
+
+
+def oo_31_rpm_rename_conversion(rpms, openshift_version=None):
+ """ Filters a list of 3.0 rpms and return the corresponding 3.1 rpms
+ names with proper version (if provided)
+
+ If 3.1 rpms are passed in they will only be augmented with the
+ correct version. This is important for hosts that are running both
+ Masters and Nodes.
+ """
+ if not isinstance(rpms, list):
+ raise errors.AnsibleFilterError("failed expects to filter on a list")
+ if openshift_version is not None and not isinstance(openshift_version, string_types):
+ raise errors.AnsibleFilterError("failed expects openshift_version to be a string")
+
+ rpms_31 = []
+ for rpm in rpms:
+ if 'atomic' not in rpm:
+ rpm = rpm.replace("openshift", "atomic-openshift")
+ if openshift_version:
+ rpm = rpm + openshift_version
+ rpms_31.append(rpm)
+
+ return rpms_31
+
+
+def oo_pods_match_component(pods, deployment_type, component):
+ """ Filters a list of Pods and returns the ones matching the deployment_type and component
+ """
+ if not isinstance(pods, list):
+ raise errors.AnsibleFilterError("failed expects to filter on a list")
+ if not isinstance(deployment_type, string_types):
+ raise errors.AnsibleFilterError("failed expects deployment_type to be a string")
+ if not isinstance(component, string_types):
+ raise errors.AnsibleFilterError("failed expects component to be a string")
+
+ image_prefix = 'openshift/origin-'
+ if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
+ image_prefix = 'openshift3/ose-'
+ elif deployment_type == 'atomic-enterprise':
+ image_prefix = 'aep3_beta/aep-'
+
+ matching_pods = []
+ image_regex = image_prefix + component + r'.*'
+ for pod in pods:
+ for container in pod['spec']['containers']:
+ if re.search(image_regex, container['image']):
+ matching_pods.append(pod)
+ break # stop here, don't add a pod more than once
+
+ return matching_pods
+
+
+def oo_get_hosts_from_hostvars(hostvars, hosts):
+ """ Return a list of hosts from hostvars """
+ retval = []
+ for host in hosts:
+ try:
+ retval.append(hostvars[host])
+ except errors.AnsibleError:
+ # host does not exist
+ pass
+
+ return retval
+
+
+def oo_image_tag_to_rpm_version(version, include_dash=False):
+ """ Convert an image tag string to an RPM version if necessary
+ Empty strings and strings that are already in rpm version format
+ are ignored. Also remove non semantic version components.
+
+ Ex. v3.2.0.10 -> -3.2.0.10
+ v1.2.0-rc1 -> -1.2.0
+ """
+ if not isinstance(version, string_types):
+ raise errors.AnsibleFilterError("|failed expects a string or unicode")
+ if version.startswith("v"):
+ version = version[1:]
+ # Strip release from requested version, we no longer support this.
+ version = version.split('-')[0]
+
+ if include_dash and version and not version.startswith("-"):
+ version = "-" + version
+
+ return version
+
+
+def oo_hostname_from_url(url):
+ """ Returns the hostname contained in a URL
+
+ Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
+ """
+ if not isinstance(url, string_types):
+ raise errors.AnsibleFilterError("|failed expects a string or unicode")
+ parse_result = urlparse(url)
+ if parse_result.netloc != '':
+ return parse_result.netloc
+ else:
+ # netloc wasn't parsed, assume url was missing scheme and path
+ return parse_result.path
+
+
+# pylint: disable=invalid-name, unused-argument
+def oo_openshift_loadbalancer_frontends(
+ api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
+ """TODO: Document me."""
+ loadbalancer_frontends = [{'name': 'atomic-openshift-api',
+ 'mode': 'tcp',
+ 'options': ['tcplog'],
+ 'binds': ["*:{0}".format(api_port)],
+ 'default_backend': 'atomic-openshift-api'}]
+ if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
+ loadbalancer_frontends.append({'name': 'nuage-monitor',
+ 'mode': 'tcp',
+ 'options': ['tcplog'],
+ 'binds': ["*:{0}".format(nuage_rest_port)],
+ 'default_backend': 'nuage-monitor'})
+ return loadbalancer_frontends
+
+
+# pylint: disable=invalid-name
+def oo_openshift_loadbalancer_backends(
+ api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
+ """TODO: Document me."""
+ loadbalancer_backends = [{'name': 'atomic-openshift-api',
+ 'mode': 'tcp',
+ 'option': 'tcplog',
+ 'balance': 'source',
+ 'servers': oo_haproxy_backend_masters(servers_hostvars, api_port)}]
+ if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
+ # pylint: disable=line-too-long
+ loadbalancer_backends.append({'name': 'nuage-monitor',
+ 'mode': 'tcp',
+ 'option': 'tcplog',
+ 'balance': 'source',
+ 'servers': oo_haproxy_backend_masters(servers_hostvars, nuage_rest_port)})
+ return loadbalancer_backends
+
+
+def oo_chomp_commit_offset(version):
+ """Chomp any "+git.foo" commit offset string from the given `version`
+ and return the modified version string.
+
+Ex:
+- chomp_commit_offset(None) => None
+- chomp_commit_offset(1337) => "1337"
+- chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
+- chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
+- chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
+ """
+ if version is None:
+ return version
+ else:
+ # Stringify, just in case it's a Number type. Split by '+' and
+ # return the first split. No concerns about strings without a
+ # '+', .split() returns an array of the original string.
+ return str(version).split('+')[0]
- if include_dash and version and not version.startswith("-"):
- version = "-" + version
- return version
+def oo_random_word(length, source='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
+ """Generates a random string of given length from a set of alphanumeric characters.
+ The default source uses [a-z][A-Z][0-9]
+ Ex:
+ - oo_random_word(3) => aB9
+ - oo_random_word(4, source='012') => 0123
+ """
+ return ''.join(random.choice(source) for i in range(length))
- @staticmethod
- def oo_hostname_from_url(url):
- """ Returns the hostname contained in a URL
- Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
- """
- if not isinstance(url, basestring):
- raise errors.AnsibleFilterError("|failed expects a string or unicode")
- parse_result = urlparse(url)
- if parse_result.netloc != '':
- return parse_result.netloc
- else:
- # netloc wasn't parsed, assume url was missing scheme and path
- return parse_result.path
-
- # pylint: disable=invalid-name, missing-docstring, unused-argument
- @staticmethod
- def oo_openshift_loadbalancer_frontends(api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
- loadbalancer_frontends = [{'name': 'atomic-openshift-api',
- 'mode': 'tcp',
- 'options': ['tcplog'],
- 'binds': ["*:{0}".format(api_port)],
- 'default_backend': 'atomic-openshift-api'}]
- if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
- loadbalancer_frontends.append({'name': 'nuage-monitor',
- 'mode': 'tcp',
- 'options': ['tcplog'],
- 'binds': ["*:{0}".format(nuage_rest_port)],
- 'default_backend': 'nuage-monitor'})
- return loadbalancer_frontends
-
- # pylint: disable=invalid-name, missing-docstring
- @staticmethod
- def oo_openshift_loadbalancer_backends(api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
- loadbalancer_backends = [{'name': 'atomic-openshift-api',
- 'mode': 'tcp',
- 'option': 'tcplog',
- 'balance': 'source',
- 'servers': FilterModule.oo_haproxy_backend_masters(servers_hostvars, api_port)}]
- if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
- # pylint: disable=line-too-long
- loadbalancer_backends.append({'name': 'nuage-monitor',
- 'mode': 'tcp',
- 'option': 'tcplog',
- 'balance': 'source',
- 'servers': FilterModule.oo_haproxy_backend_masters(servers_hostvars, nuage_rest_port)})
- return loadbalancer_backends
-
- @staticmethod
- def oo_chomp_commit_offset(version):
- """Chomp any "+git.foo" commit offset string from the given `version`
- and return the modified version string.
-
- Ex:
- - chomp_commit_offset(None) => None
- - chomp_commit_offset(1337) => "1337"
- - chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
- - chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
- - chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
- """
- if version is None:
- return version
- else:
- # Stringify, just in case it's a Number type. Split by '+' and
- # return the first split. No concerns about strings without a
- # '+', .split() returns an array of the original string.
- return str(version).split('+')[0]
+class FilterModule(object):
+ """ Custom ansible filter mapping """
+ # pylint: disable=no-self-use, too-few-public-methods
def filters(self):
""" returns a mapping of filters to methods """
return {
- "oo_select_keys": self.oo_select_keys,
- "oo_select_keys_from_list": self.oo_select_keys_from_list,
- "oo_chomp_commit_offset": self.oo_chomp_commit_offset,
- "oo_collect": self.oo_collect,
- "oo_flatten": self.oo_flatten,
- "oo_pdb": self.oo_pdb,
- "oo_prepend_strings_in_list": self.oo_prepend_strings_in_list,
- "oo_ami_selector": self.oo_ami_selector,
- "oo_ec2_volume_definition": self.oo_ec2_volume_definition,
- "oo_combine_key_value": self.oo_combine_key_value,
- "oo_combine_dict": self.oo_combine_dict,
- "oo_split": self.oo_split,
- "oo_filter_list": self.oo_filter_list,
- "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
- "oo_parse_named_certificates": self.oo_parse_named_certificates,
- "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters,
- "oo_pretty_print_cluster": self.oo_pretty_print_cluster,
- "oo_generate_secret": self.oo_generate_secret,
- "to_padded_yaml": self.to_padded_yaml,
- "oo_nodes_with_label": self.oo_nodes_with_label,
- "oo_openshift_env": self.oo_openshift_env,
- "oo_persistent_volumes": self.oo_persistent_volumes,
- "oo_persistent_volume_claims": self.oo_persistent_volume_claims,
- "oo_31_rpm_rename_conversion": self.oo_31_rpm_rename_conversion,
- "oo_pods_match_component": self.oo_pods_match_component,
- "oo_get_hosts_from_hostvars": self.oo_get_hosts_from_hostvars,
- "oo_image_tag_to_rpm_version": self.oo_image_tag_to_rpm_version,
- "oo_merge_dicts": self.oo_merge_dicts,
- "oo_hostname_from_url": self.oo_hostname_from_url,
- "oo_merge_hostvars": self.oo_merge_hostvars,
- "oo_openshift_loadbalancer_frontends": self.oo_openshift_loadbalancer_frontends,
- "oo_openshift_loadbalancer_backends": self.oo_openshift_loadbalancer_backends
+ "oo_select_keys": oo_select_keys,
+ "oo_select_keys_from_list": oo_select_keys_from_list,
+ "oo_chomp_commit_offset": oo_chomp_commit_offset,
+ "oo_collect": oo_collect,
+ "oo_flatten": oo_flatten,
+ "oo_pdb": oo_pdb,
+ "oo_prepend_strings_in_list": oo_prepend_strings_in_list,
+ "oo_ami_selector": oo_ami_selector,
+ "oo_ec2_volume_definition": oo_ec2_volume_definition,
+ "oo_combine_key_value": oo_combine_key_value,
+ "oo_combine_dict": oo_combine_dict,
+ "oo_split": oo_split,
+ "oo_filter_list": oo_filter_list,
+ "oo_parse_heat_stack_outputs": oo_parse_heat_stack_outputs,
+ "oo_parse_named_certificates": oo_parse_named_certificates,
+ "oo_haproxy_backend_masters": oo_haproxy_backend_masters,
+ "oo_pretty_print_cluster": oo_pretty_print_cluster,
+ "oo_generate_secret": oo_generate_secret,
+ "oo_nodes_with_label": oo_nodes_with_label,
+ "oo_openshift_env": oo_openshift_env,
+ "oo_persistent_volumes": oo_persistent_volumes,
+ "oo_persistent_volume_claims": oo_persistent_volume_claims,
+ "oo_31_rpm_rename_conversion": oo_31_rpm_rename_conversion,
+ "oo_pods_match_component": oo_pods_match_component,
+ "oo_get_hosts_from_hostvars": oo_get_hosts_from_hostvars,
+ "oo_image_tag_to_rpm_version": oo_image_tag_to_rpm_version,
+ "oo_merge_dicts": oo_merge_dicts,
+ "oo_hostname_from_url": oo_hostname_from_url,
+ "oo_merge_hostvars": oo_merge_hostvars,
+ "oo_openshift_loadbalancer_frontends": oo_openshift_loadbalancer_frontends,
+ "oo_openshift_loadbalancer_backends": oo_openshift_loadbalancer_backends,
+ "to_padded_yaml": to_padded_yaml,
+ "oo_random_word": oo_random_word
}
diff --git a/filter_plugins/oo_zabbix_filters.py b/filter_plugins/oo_zabbix_filters.py
deleted file mode 100644
index 1c1854b29..000000000
--- a/filter_plugins/oo_zabbix_filters.py
+++ /dev/null
@@ -1,160 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# vim: expandtab:tabstop=4:shiftwidth=4
-'''
-Custom zabbix filters for use in openshift-ansible
-'''
-
-import pdb
-
-
-class FilterModule(object):
- ''' Custom zabbix ansible filters '''
-
- @staticmethod
- def create_data(data, results, key, new_key):
- '''Take a dict, filter through results and add results['key'] to dict
- '''
- new_list = [app[key] for app in results]
- data[new_key] = new_list
- return data
-
- @staticmethod
- def oo_set_zbx_trigger_triggerid(item, trigger_results):
- '''Set zabbix trigger id from trigger results
- '''
- if isinstance(trigger_results, list):
- item['triggerid'] = trigger_results[0]['triggerid']
- return item
-
- item['triggerid'] = trigger_results['triggerids'][0]
- return item
-
- @staticmethod
- def oo_set_zbx_item_hostid(item, template_results):
- ''' Set zabbix host id from template results
- '''
- if isinstance(template_results, list):
- item['hostid'] = template_results[0]['templateid']
- return item
-
- item['hostid'] = template_results['templateids'][0]
- return item
-
- @staticmethod
- def oo_pdb(arg):
- ''' This pops you into a pdb instance where arg is the data passed in
- from the filter.
- Ex: "{{ hostvars | oo_pdb }}"
- '''
- pdb.set_trace()
- return arg
-
- @staticmethod
- def select_by_name(ans_data, data):
- ''' test
- '''
- for zabbix_item in data:
- if ans_data['name'] == zabbix_item:
- data[zabbix_item]['params']['hostid'] = ans_data['templateid']
- return data[zabbix_item]['params']
- return None
-
- @staticmethod
- def oo_build_zabbix_collect(data, string, value):
- ''' Build a list of dicts from a list of data matched on string attribute
- '''
- rval = []
- for item in data:
- if item[string] == value:
- rval.append(item)
-
- return rval
-
- @staticmethod
- def oo_build_zabbix_list_dict(values, string):
- ''' Build a list of dicts with string as key for each value
- '''
- rval = []
- for value in values:
- rval.append({string: value})
- return rval
-
- @staticmethod
- def oo_remove_attr_from_list_dict(data, attr):
- ''' Remove a specific attribute from a dict
- '''
- attrs = []
- if isinstance(attr, str):
- attrs.append(attr)
- else:
- attrs = attr
-
- for attribute in attrs:
- for _entry in data:
- _entry.pop(attribute, None)
-
- return data
-
- @staticmethod
- def itservice_results_builder(data, clusters, keys):
- '''Take a list of dict results,
- loop through each results and create a hash
- of:
- [{clusterid: cluster1, key: 111 }]
- '''
- r_list = []
- for cluster in clusters:
- for results in data:
- if cluster == results['item'][0]:
- results = results['results']
- if results and len(results) > 0 and all([_key in results[0] for _key in keys]):
- tmp = {}
- tmp['clusterid'] = cluster
- for key in keys:
- tmp[key] = results[0][key]
- r_list.append(tmp)
-
- return r_list
-
- @staticmethod
- def itservice_dependency_builder(data, cluster):
- '''Take a list of dict results,
- loop through each results and create a hash
- of:
- [{clusterid: cluster1, key: 111 }]
- '''
- r_list = []
- for dep in data:
- if cluster == dep['clusterid']:
- r_list.append({'name': '%s - %s' % (dep['clusterid'], dep['description']), 'dep_type': 'hard'})
-
- return r_list
-
- @staticmethod
- def itservice_dep_builder_list(data):
- '''Take a list of dict results,
- loop through each results and create a hash
- of:
- [{clusterid: cluster1, key: 111 }]
- '''
- r_list = []
- for dep in data:
- r_list.append({'name': '%s' % dep, 'dep_type': 'hard'})
-
- return r_list
-
- def filters(self):
- ''' returns a mapping of filters to methods '''
- return {
- "select_by_name": self.select_by_name,
- "oo_set_zbx_item_hostid": self.oo_set_zbx_item_hostid,
- "oo_set_zbx_trigger_triggerid": self.oo_set_zbx_trigger_triggerid,
- "oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict,
- "create_data": self.create_data,
- "oo_build_zabbix_collect": self.oo_build_zabbix_collect,
- "oo_remove_attr_from_list_dict": self.oo_remove_attr_from_list_dict,
- "itservice_results_builder": self.itservice_results_builder,
- "itservice_dependency_builder": self.itservice_dependency_builder,
- "itservice_dep_builder_list": self.itservice_dep_builder_list,
- }
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
index 57b1f7d82..f71d9b863 100644
--- a/filter_plugins/openshift_master.py
+++ b/filter_plugins/openshift_master.py
@@ -6,22 +6,14 @@ Custom filters for use in openshift-master
'''
import copy
import sys
-import yaml
+
+from distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error
from ansible import errors
+from ansible.plugins.filter.core import to_bool as ansible_bool
+from six import string_types
-# pylint: disable=no-name-in-module,import-error,wrong-import-order
-from distutils.version import LooseVersion
-try:
- # ansible-2.1
- from ansible.plugins.filter.core import to_bool as ansible_bool
-except ImportError:
- try:
- # ansible-2.0.x
- from ansible.runner.filter_plugins.core import bool as ansible_bool
- except ImportError:
- # ansible-1.9.x
- from ansible.plugins.filter.core import bool as ansible_bool
+import yaml
class IdentityProviderBase(object):
@@ -169,7 +161,7 @@ class LDAPPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['attributes'], ['url'], ['insecure']]
self._optional += [['ca'],
@@ -184,7 +176,6 @@ class LDAPPasswordIdentityProvider(IdentityProviderBase):
def validate(self):
''' validate this idp instance '''
- IdentityProviderBase.validate(self)
if not isinstance(self.provider['attributes'], dict):
raise errors.AnsibleFilterError("|failed attributes for provider "
"{0} must be a dictionary".format(self.__class__.__name__))
@@ -214,7 +205,7 @@ class KeystonePasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['url'], ['domainName', 'domain_name']]
self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
@@ -233,7 +224,7 @@ class RequestHeaderIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['headers']]
self._optional += [['challengeURL', 'challenge_url'],
@@ -246,7 +237,6 @@ class RequestHeaderIdentityProvider(IdentityProviderBase):
def validate(self):
''' validate this idp instance '''
- IdentityProviderBase.validate(self)
if not isinstance(self.provider['headers'], list):
raise errors.AnsibleFilterError("|failed headers for provider {0} "
"must be a list".format(self.__class__.__name__))
@@ -265,7 +255,7 @@ class AllowAllPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
@@ -282,7 +272,7 @@ class DenyAllPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
@@ -299,7 +289,7 @@ class HTPasswdPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['file', 'filename', 'fileName', 'file_name']]
@@ -324,7 +314,7 @@ class BasicAuthPasswordIdentityProvider(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['url']]
self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
@@ -343,13 +333,12 @@ class IdentityProviderOauthBase(IdentityProviderBase):
AnsibleFilterError:
"""
def __init__(self, api_version, idp):
- IdentityProviderBase.__init__(self, api_version, idp)
+ super(self.__class__, self).__init__(api_version, idp)
self._allow_additional = False
self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]
def validate(self):
''' validate this idp instance '''
- IdentityProviderBase.validate(self)
if self.challenge:
raise errors.AnsibleFilterError("|failed provider {0} does not "
"allow challenge authentication".format(self.__class__.__name__))
@@ -513,7 +502,7 @@ class FilterModule(object):
'master3.example.com']
returns True
'''
- if not issubclass(type(data), basestring):
+ if not issubclass(type(data), string_types):
raise errors.AnsibleFilterError("|failed expects data is a string or unicode")
if not issubclass(type(masters), list):
raise errors.AnsibleFilterError("|failed expects masters is a list")
@@ -528,7 +517,9 @@ class FilterModule(object):
''' Return certificates to synchronize based on facts. '''
if not issubclass(type(hostvars), dict):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
- certs = ['admin.crt',
+ certs = ['ca.crt',
+ 'ca.key',
+ 'admin.crt',
'admin.key',
'admin.kubeconfig',
'master.kubelet-client.crt',
@@ -558,7 +549,7 @@ class FilterModule(object):
def oo_htpasswd_users_from_file(file_contents):
''' return a dictionary of htpasswd users from htpasswd file contents '''
htpasswd_entries = {}
- if not isinstance(file_contents, basestring):
+ if not isinstance(file_contents, string_types):
raise errors.AnsibleFilterError("failed, expects to filter on a string")
for line in file_contents.splitlines():
user = None
diff --git a/git/parent.py b/git/parent.py
deleted file mode 100755
index 92f57df3e..000000000
--- a/git/parent.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env python
-# flake8: noqa
-# pylint: skip-file
-'''
- Script to determine if this commit has also
- been merged through the stage branch
-'''
-#
-# Usage:
-# parent_check.py <branch> <commit_id>
-#
-#
-import sys
-import subprocess
-
-def run_cli_cmd(cmd, in_stdout=None, in_stderr=None):
- '''Run a command and return its output'''
- if not in_stderr:
- proc = subprocess.Popen(cmd, bufsize=-1, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=False)
- else:
- proc = subprocess.check_output(cmd, bufsize=-1, stdout=in_stdout, stderr=in_stderr, shell=False)
- stdout, stderr = proc.communicate()
- if proc.returncode != 0:
- return {"rc": proc.returncode, "error": stderr}
- else:
- return {"rc": proc.returncode, "result": stdout}
-
-def main():
- '''Check to ensure that the commit that is currently
- being submitted is also in the stage branch.
-
- if it is, succeed
- else, fail
- '''
- branch = 'prod'
-
- if sys.argv[1] != branch:
- sys.exit(0)
-
- # git co stg
- results = run_cli_cmd(['/usr/bin/git', 'checkout', 'stg'])
-
- # git pull latest
- results = run_cli_cmd(['/usr/bin/git', 'pull'])
-
- # setup on the <prod> branch in git
- results = run_cli_cmd(['/usr/bin/git', 'checkout', 'prod'])
-
- results = run_cli_cmd(['/usr/bin/git', 'pull'])
- # merge the passed in commit into my current <branch>
-
- commit_id = sys.argv[2]
- results = run_cli_cmd(['/usr/bin/git', 'merge', commit_id])
-
- # get the differences from stg and <branch>
- results = run_cli_cmd(['/usr/bin/git', 'rev-list', '--left-right', 'stg...prod'])
-
- # exit here with error code if the result coming back is an error
- if results['rc'] != 0:
- print results['error']
- sys.exit(results['rc'])
-
- count = 0
- # Each 'result' is a commit
- # Walk through each commit and see if it is in stg
- for commit in results['result'].split('\n'):
-
- # continue if it is already in stg
- if not commit or commit.startswith('<'):
- continue
-
- # remove the first char '>'
- commit = commit[1:]
-
- # check if any remote branches contain $commit
- results = run_cli_cmd(['/usr/bin/git', 'branch', '-q', '-r', '--contains', commit], in_stderr=None)
-
- # if this comes back empty, nothing contains it, we can skip it as
- # we have probably created the merge commit here locally
- if results['rc'] == 0 and len(results['result']) == 0:
- continue
-
- # The results generally contain origin/pr/246/merge and origin/pr/246/head
- # this is the pull request which would contain the commit in question.
- #
- # If the results do not contain origin/stg then stage does not contain
- # the commit in question. Therefore we need to alert!
- if 'origin/stg' not in results['result']:
- print "\nFAILED: (These commits are not in stage.)\n"
- print "\t%s" % commit
- count += 1
-
- # Exit with count of commits in #{branch} but not stg
- sys.exit(count)
-
-if __name__ == '__main__':
- main()
diff --git a/git/pylint.sh b/git/pylint.sh
deleted file mode 100755
index 3acf9cc8c..000000000
--- a/git/pylint.sh
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env bash
-set -eu
-
-ANSIBLE_UPSTREAM_FILES=(
- 'inventory/aws/hosts/ec2.py'
- 'inventory/gce/hosts/gce.py'
- 'inventory/libvirt/hosts/libvirt_generic.py'
- 'inventory/openstack/hosts/nova.py'
- 'lookup_plugins/sequence.py'
- 'playbooks/gce/openshift-cluster/library/gce.py'
- )
-
-OLDREV=$1
-NEWREV=$2
-#TRG_BRANCH=$3
-
-PYTHON=$(which python)
-
-set +e
-PY_DIFF=$(/usr/bin/git diff --name-only $OLDREV $NEWREV --diff-filter=ACM | grep ".py$")
-set -e
-
-FILES_TO_TEST=""
-
-for PY_FILE in $PY_DIFF; do
- IGNORE_FILE=false
- for UPSTREAM_FILE in "${ANSIBLE_UPSTREAM_FILES[@]}"; do
- if [ "${PY_FILE}" == "${UPSTREAM_FILE}" ]; then
- IGNORE_FILE=true
- break
- fi
- done
-
- if [ "${IGNORE_FILE}" == true ]; then
- echo "Skipping file ${PY_FILE} as an upstream Ansible file..."
- continue
- fi
-
- if [ -e "${PY_FILE}" ]; then
- FILES_TO_TEST="${FILES_TO_TEST} ${PY_FILE}"
- fi
-done
-
-export PYTHONPATH=${WORKSPACE}/utils/src/:${WORKSPACE}/utils/test/
-
-if [ "${FILES_TO_TEST}" != "" ]; then
- echo "Testing files: ${FILES_TO_TEST}"
- exec ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc ${FILES_TO_TEST}
-else
- exit 0
-fi
diff --git a/git/yaml_validation.py b/git/yaml_validation.py
deleted file mode 100755
index 6672876bb..000000000
--- a/git/yaml_validation.py
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/usr/bin/env python
-# flake8: noqa
-#
-# python yaml validator for a git commit
-#
-'''
-python yaml validator for a git commit
-'''
-import shutil
-import sys
-import os
-import tempfile
-import subprocess
-import yaml
-
-def get_changes(oldrev, newrev, tempdir):
- '''Get a list of git changes from oldrev to newrev'''
- proc = subprocess.Popen(['/usr/bin/git', 'diff', '--name-only', oldrev,
- newrev, '--diff-filter=ACM'], stdout=subprocess.PIPE)
- stdout, _ = proc.communicate()
- files = stdout.split('\n')
-
- # No file changes
- if not files:
- return []
-
- cmd = '/usr/bin/git archive %s %s | /bin/tar x -C %s' % (newrev, " ".join(files), tempdir)
- proc = subprocess.Popen(cmd, shell=True)
- _, _ = proc.communicate()
-
- rfiles = []
- for dirpath, _, fnames in os.walk(tempdir):
- for fname in fnames:
- rfiles.append(os.path.join(dirpath, fname))
-
- return rfiles
-
-def main():
- '''
- Perform yaml validation
- '''
- results = []
- try:
- tmpdir = tempfile.mkdtemp(prefix='jenkins-git-')
- old, new, _ = sys.argv[1:]
-
- for file_mod in get_changes(old, new, tmpdir):
-
- print "+++++++ Received: %s" % file_mod
-
- # if the file extensions is not yml or yaml, move along.
- if not file_mod.endswith('.yml') and not file_mod.endswith('.yaml'):
- continue
-
- # We use symlinks in our repositories, ignore them.
- if os.path.islink(file_mod):
- continue
-
- try:
- yaml.load(open(file_mod))
- results.append(True)
-
- except yaml.scanner.ScannerError as yerr:
- print yerr
- results.append(False)
- finally:
- shutil.rmtree(tmpdir)
-
- if not all(results):
- sys.exit(1)
-
-if __name__ == "__main__":
- main()
diff --git a/inventory/README.md b/inventory/README.md
index b8edfcbb0..b61bfff18 100644
--- a/inventory/README.md
+++ b/inventory/README.md
@@ -5,5 +5,5 @@ You can install OpenShift on:
* [Amazon Web Services](aws/hosts/)
* [BYO](byo/) (Bring your own), use this inventory config file to install OpenShift on your bare metal servers
* [GCE](gce/) (Google Compute Engine)
-* [libvirt](libviert/hosts/)
+* [libvirt](libvirt/hosts/)
* [OpenStack](openstack/hosts/)
diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini
index 5ee51c84f..64c097d47 100644
--- a/inventory/aws/hosts/ec2.ini
+++ b/inventory/aws/hosts/ec2.ini
@@ -29,17 +29,32 @@ regions_exclude = us-gov-west-1,cn-north-1
# in the event of a collision.
destination_variable = public_dns_name
+# This allows you to override the inventory_name with an ec2 variable, instead
+# of using the destination_variable above. Addressing (aka ansible_ssh_host)
+# will still use destination_variable. Tags should be written as 'tag_TAGNAME'.
+hostname_variable = tag_Name
+
# For server inside a VPC, using DNS names may not make sense. When an instance
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
# be run from within EC2. The key of an EC2 tag may optionally be used; however
# the boto instance variables hold precedence in the event of a collision.
-# WARNING: - instances that are in the private vpc, _without_ public ip address
+# WARNING: - instances that are in the private vpc, _without_ public ip address
# will not be listed in the inventory until You set:
-# vpc_destination_variable = 'private_ip_address'
+# vpc_destination_variable = private_ip_address
vpc_destination_variable = ip_address
+# The following two settings allow flexible ansible host naming based on a
+# python format string and a comma-separated list of ec2 tags. Note that:
+#
+# 1) If the tags referenced are not present for some instances, empty strings
+# will be substituted in the format string.
+# 2) This overrides both destination_variable and vpc_destination_variable.
+#
+#destination_format = {0}.{1}.example.com
+#destination_format_tags = Name,environment
+
# To tag instances on EC2 with the resource records that point to them from
# Route53, uncomment and set 'route53' to True.
route53 = False
@@ -67,6 +82,9 @@ all_instances = False
# 'all_rds_instances' to True return all RDS instances regardless of state.
all_rds_instances = False
+# Include RDS cluster information (Aurora etc.)
+include_rds_clusters = False
+
# By default, only ElastiCache clusters and nodes in the 'available' state
# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
# to True return all ElastiCache clusters and nodes, regardless of state.
@@ -91,19 +109,16 @@ cache_path = ~/.ansible/tmp
# To disable the cache, set this value to 0
cache_max_age = 300
-# These two settings allow flexible ansible host naming based on a format
-# string and a comma-separated list of ec2 tags. The tags used must be
-# present for all instances, or the code will fail. This overrides both
-# destination_variable and vpc_destination_variable.
-# destination_format = {0}.{1}.rhcloud.com
-# destination_format_tags = Name,environment
-
# Organize groups into a nested/hierarchy instead of a flat namespace.
nested_groups = False
# Replace - tags when creating groups to avoid issues with ansible
replace_dash_in_groups = False
+# If set to true, any tag of the form "a,b,c" is expanded into a list
+# and the results are used to create additional tag_* inventory groups.
+expand_csv_tags = False
+
# The EC2 inventory output can become very large. To manage its size,
# configure which groups should be created.
group_by_instance_id = True
@@ -147,9 +162,28 @@ group_by_elasticache_replication_group = True
# You can use wildcards in filter values also. Below will list instances which
# tag Name value matches webservers1*
-# (ex. webservers15, webservers1a, webservers123 etc)
+# (ex. webservers15, webservers1a, webservers123 etc)
# instance_filters = tag:Name=webservers1*
# A boto configuration profile may be used to separate out credentials
# see http://boto.readthedocs.org/en/latest/boto_config_tut.html
# boto_profile = some-boto-profile-name
+
+
+[credentials]
+
+# The AWS credentials can optionally be specified here. Credentials specified
+# here are ignored if the environment variable AWS_ACCESS_KEY_ID or
+# AWS_PROFILE is set, or if the boto_profile property above is set.
+#
+# Supplying AWS credentials here is not recommended, as it introduces
+# non-trivial security concerns. When going down this route, please make sure
+# to set access permissions for this file correctly, e.g. handle it the same
+# way as you would a private SSH key.
+#
+# Unlike the boto and AWS configure files, this section does not support
+# profiles.
+#
+# aws_access_key_id = AXXXXXXXXXXXXXX
+# aws_secret_access_key = XXXXXXXXXXXXXXXXXXX
+# aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX
diff --git a/inventory/aws/hosts/ec2.py b/inventory/aws/hosts/ec2.py
index 7dfcd7889..b71458a29 100755
--- a/inventory/aws/hosts/ec2.py
+++ b/inventory/aws/hosts/ec2.py
@@ -38,6 +38,7 @@ When run against a specific host, this script returns the following variables:
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
+ - ec2_block_devices
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
@@ -132,6 +133,15 @@ from boto import elasticache
from boto import route53
import six
+from ansible.module_utils import ec2 as ec2_utils
+
+HAS_BOTO3 = False
+try:
+ import boto3
+ HAS_BOTO3 = True
+except ImportError:
+ pass
+
from six.moves import configparser
from collections import defaultdict
@@ -142,6 +152,7 @@ except ImportError:
class Ec2Inventory(object):
+
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
@@ -158,6 +169,9 @@ class Ec2Inventory(object):
# Boto profile to use (if any)
self.boto_profile = None
+ # AWS credentials.
+ self.credentials = {}
+
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
@@ -225,7 +239,7 @@ class Ec2Inventory(object):
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
- self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
+ self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
@@ -237,6 +251,11 @@ class Ec2Inventory(object):
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
+ if config.has_option('ec2', 'hostname_variable'):
+ self.hostname_variable = config.get('ec2', 'hostname_variable')
+ else:
+ self.hostname_variable = None
+
if config.has_option('ec2', 'destination_format') and \
config.has_option('ec2', 'destination_format_tags'):
self.destination_format = config.get('ec2', 'destination_format')
@@ -257,6 +276,12 @@ class Ec2Inventory(object):
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
+ # Include RDS cluster instances?
+ if config.has_option('ec2', 'include_rds_clusters'):
+ self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters')
+ else:
+ self.include_rds_clusters = False
+
# Include ElastiCache instances?
self.elasticache_enabled = True
if config.has_option('ec2', 'elasticache'):
@@ -319,6 +344,29 @@ class Ec2Inventory(object):
if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
self.boto_profile = config.get('ec2', 'boto_profile')
+ # AWS credentials (prefer environment variables)
+ if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
+ os.environ.get('AWS_PROFILE')):
+ if config.has_option('credentials', 'aws_access_key_id'):
+ aws_access_key_id = config.get('credentials', 'aws_access_key_id')
+ else:
+ aws_access_key_id = None
+ if config.has_option('credentials', 'aws_secret_access_key'):
+ aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
+ else:
+ aws_secret_access_key = None
+ if config.has_option('credentials', 'aws_security_token'):
+ aws_security_token = config.get('credentials', 'aws_security_token')
+ else:
+ aws_security_token = None
+ if aws_access_key_id:
+ self.credentials = {
+ 'aws_access_key_id': aws_access_key_id,
+ 'aws_secret_access_key': aws_secret_access_key
+ }
+ if aws_security_token:
+ self.credentials['security_token'] = aws_security_token
+
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if self.boto_profile:
@@ -326,10 +374,22 @@ class Ec2Inventory(object):
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
- self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
- self.cache_path_index = cache_dir + "/ansible-ec2.index"
+ cache_name = 'ansible-ec2'
+ aws_profile = lambda: (self.boto_profile or
+ os.environ.get('AWS_PROFILE') or
+ os.environ.get('AWS_ACCESS_KEY_ID') or
+ self.credentials.get('aws_access_key_id', None))
+ if aws_profile():
+ cache_name = '%s-%s' % (cache_name, aws_profile())
+ self.cache_path_cache = cache_dir + "/%s.cache" % cache_name
+ self.cache_path_index = cache_dir + "/%s.index" % cache_name
self.cache_max_age = config.getint('ec2', 'cache_max_age')
+ if config.has_option('ec2', 'expand_csv_tags'):
+ self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
+ else:
+ self.expand_csv_tags = False
+
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
@@ -391,7 +451,10 @@ class Ec2Inventory(object):
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = defaultdict(list)
if config.has_option('ec2', 'instance_filters'):
- for instance_filter in config.get('ec2', 'instance_filters', '').split(','):
+
+ filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f]
+
+ for instance_filter in filters:
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
@@ -410,7 +473,7 @@ class Ec2Inventory(object):
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
- parser.add_argument('--boto-profile', action='store',
+ parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
@@ -428,6 +491,8 @@ class Ec2Inventory(object):
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
+ if self.include_rds_clusters:
+ self.include_rds_clusters_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
@@ -435,7 +500,7 @@ class Ec2Inventory(object):
def connect(self, region):
''' create connection to api server'''
if self.eucalyptus:
- conn = boto.connect_euca(host=self.eucalyptus_host)
+ conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials)
conn.APIVersion = '2010-08-31'
else:
conn = self.connect_to_aws(ec2, region)
@@ -449,7 +514,7 @@ class Ec2Inventory(object):
return connect_args
def connect_to_aws(self, module, region):
- connect_args = {}
+ connect_args = self.credentials
# only pass the profile name if it's set (as it is not supported by older boto versions)
if self.boto_profile:
@@ -475,8 +540,25 @@ class Ec2Inventory(object):
else:
reservations = conn.get_all_instances()
+ # Pull the tags back in a second step
+ # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not
+ # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags`
+ instance_ids = []
+ for reservation in reservations:
+ instance_ids.extend([instance.id for instance in reservation.instances])
+
+ max_filter_value = 199
+ tags = []
+ for i in range(0, len(instance_ids), max_filter_value):
+ tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]}))
+
+ tags_by_instance_id = defaultdict(dict)
+ for tag in tags:
+ tags_by_instance_id[tag.res_id][tag.name] = tag.value
+
for reservation in reservations:
for instance in reservation.instances:
+ instance.tags = tags_by_instance_id[instance.id]
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
@@ -494,9 +576,14 @@ class Ec2Inventory(object):
try:
conn = self.connect_to_aws(rds, region)
if conn:
- instances = conn.get_all_dbinstances()
- for instance in instances:
- self.add_rds_instance(instance, region)
+ marker = None
+ while True:
+ instances = conn.get_all_dbinstances(marker=marker)
+ marker = instances.marker
+ for instance in instances:
+ self.add_rds_instance(instance, region)
+ if not marker:
+ break
except boto.exception.BotoServerError as e:
error = e.reason
@@ -506,6 +593,65 @@ class Ec2Inventory(object):
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
+ def include_rds_clusters_by_region(self, region):
+ if not HAS_BOTO3:
+ self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again",
+ "getting RDS clusters")
+
+ client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials)
+
+ marker, clusters = '', []
+ while marker is not None:
+ resp = client.describe_db_clusters(Marker=marker)
+ clusters.extend(resp["DBClusters"])
+ marker = resp.get('Marker', None)
+
+ account_id = boto.connect_iam().get_user().arn.split(':')[4]
+ c_dict = {}
+ for c in clusters:
+ # remove these datetime objects as there is no serialisation to json
+ # currently in place and we don't need the data yet
+ if 'EarliestRestorableTime' in c:
+ del c['EarliestRestorableTime']
+ if 'LatestRestorableTime' in c:
+ del c['LatestRestorableTime']
+
+ if self.ec2_instance_filters == {}:
+ matches_filter = True
+ else:
+ matches_filter = False
+
+ try:
+ # arn:aws:rds:<region>:<account number>:<resourcetype>:<name>
+ tags = client.list_tags_for_resource(
+ ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier'])
+ c['Tags'] = tags['TagList']
+
+ if self.ec2_instance_filters:
+ for filter_key, filter_values in self.ec2_instance_filters.items():
+ # get AWS tag key e.g. tag:env will be 'env'
+ tag_name = filter_key.split(":", 1)[1]
+ # Filter values is a list (if you put multiple values for the same tag name)
+ matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags'])
+
+ if matches_filter:
+ # it matches a filter, so stop looking for further matches
+ break
+
+ except Exception as e:
+ if e.message.find('DBInstanceNotFound') >= 0:
+ # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster.
+ # Ignore errors when trying to find tags for these
+ pass
+
+ # ignore empty clusters caused by AWS bug
+ if len(c['DBClusterMembers']) == 0:
+ continue
+ elif matches_filter:
+ c_dict[c['DBClusterIdentifier']] = c
+
+ self.inventory['db_clusters'] = c_dict
+
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
@@ -514,7 +660,7 @@ class Ec2Inventory(object):
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
- conn = elasticache.connect_to_region(region)
+ conn = self.connect_to_aws(elasticache, region)
if conn:
# show_cache_node_info = True
# because we also want nodes' information
@@ -531,7 +677,7 @@ class Ec2Inventory(object):
try:
# Boto also doesn't provide wrapper classes to CacheClusters or
- # CacheNodes. Because of that wo can't make use of the get_list
+ # CacheNodes. Because of that we can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually
clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
@@ -550,7 +696,7 @@ class Ec2Inventory(object):
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
- conn = elasticache.connect_to_region(region)
+ conn = self.connect_to_aws(elasticache, region)
if conn:
response = conn.describe_replication_groups()
@@ -565,7 +711,7 @@ class Ec2Inventory(object):
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
- # Because of that wo can't make use of the get_list method in the
+ # Because of that we can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
@@ -619,7 +765,7 @@ class Ec2Inventory(object):
# Select the best destination address
if self.destination_format and self.destination_format_tags:
- dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, 'nil') for tag in self.destination_format_tags ])
+ dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ])
elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
@@ -633,32 +779,46 @@ class Ec2Inventory(object):
# Skip instances we cannot address (e.g. private VPC subnet)
return
+ # Set the inventory name
+ hostname = None
+ if self.hostname_variable:
+ if self.hostname_variable.startswith('tag_'):
+ hostname = instance.tags.get(self.hostname_variable[4:], None)
+ else:
+ hostname = getattr(instance, self.hostname_variable)
+
+ # If we can't get a nice hostname, use the destination address
+ if not hostname:
+ hostname = dest
+ else:
+ hostname = self.to_safe(hostname).lower()
+
# if we only want to include hosts that match a pattern, skip those that don't
- if self.pattern_include and not self.pattern_include.match(dest):
+ if self.pattern_include and not self.pattern_include.match(hostname):
return
# if we need to exclude hosts that match a pattern, skip those
- if self.pattern_exclude and self.pattern_exclude.match(dest):
+ if self.pattern_exclude and self.pattern_exclude.match(hostname):
return
# Add to index
- self.index[dest] = [region, instance.id]
+ self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
- self.inventory[instance.id] = [dest]
+ self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
- self.push(self.inventory, region, dest)
+ self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
- self.push(self.inventory, instance.placement, dest)
+ self.push(self.inventory, instance.placement, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.placement)
@@ -667,28 +827,28 @@ class Ec2Inventory(object):
# Inventory: Group by Amazon Machine Image (AMI) ID
if self.group_by_ami_id:
ami_id = self.to_safe(instance.image_id)
- self.push(self.inventory, ami_id, dest)
+ self.push(self.inventory, ami_id, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'images', ami_id)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_type)
- self.push(self.inventory, type_name, dest)
+ self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
- self.push(self.inventory, key_name, dest)
+ self.push(self.inventory, key_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
- self.push(self.inventory, vpc_id_name, dest)
+ self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
@@ -697,7 +857,7 @@ class Ec2Inventory(object):
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
- self.push(self.inventory, key, dest)
+ self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
@@ -707,34 +867,41 @@ class Ec2Inventory(object):
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
- if v:
- key = self.to_safe("tag_" + k + "=" + v)
+ if self.expand_csv_tags and v and ',' in v:
+ values = map(lambda x: x.strip(), v.split(','))
else:
- key = self.to_safe("tag_" + k)
- self.push(self.inventory, key, dest)
- if self.nested_groups:
- self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
+ values = [v]
+
+ for v in values:
if v:
- self.push_group(self.inventory, self.to_safe("tag_" + k), key)
+ key = self.to_safe("tag_" + k + "=" + v)
+ else:
+ key = self.to_safe("tag_" + k)
+ self.push(self.inventory, key, hostname)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
+ if v:
+ self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
- self.push(self.inventory, name, dest)
+ self.push(self.inventory, name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
- self.push(self.inventory, 'tag_none', dest)
+ self.push(self.inventory, 'tag_none', hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all EC2 instances
- self.push(self.inventory, 'ec2', dest)
+ self.push(self.inventory, 'ec2', hostname)
- self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
+ self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
+ self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_rds_instance(self, instance, region):
@@ -752,24 +919,38 @@ class Ec2Inventory(object):
# Skip instances we cannot address (e.g. private VPC subnet)
return
+ # Set the inventory name
+ hostname = None
+ if self.hostname_variable:
+ if self.hostname_variable.startswith('tag_'):
+ hostname = instance.tags.get(self.hostname_variable[4:], None)
+ else:
+ hostname = getattr(instance, self.hostname_variable)
+
+ # If we can't get a nice hostname, use the destination address
+ if not hostname:
+ hostname = dest
+
+ hostname = self.to_safe(hostname).lower()
+
# Add to index
- self.index[dest] = [region, instance.id]
+ self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
- self.inventory[instance.id] = [dest]
+ self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
- self.push(self.inventory, region, dest)
+ self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
- self.push(self.inventory, instance.availability_zone, dest)
+ self.push(self.inventory, instance.availability_zone, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.availability_zone)
@@ -778,14 +959,14 @@ class Ec2Inventory(object):
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_class)
- self.push(self.inventory, type_name, dest)
+ self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
- self.push(self.inventory, vpc_id_name, dest)
+ self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
@@ -794,7 +975,7 @@ class Ec2Inventory(object):
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
- self.push(self.inventory, key, dest)
+ self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
@@ -805,20 +986,21 @@ class Ec2Inventory(object):
# Inventory: Group by engine
if self.group_by_rds_engine:
- self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
+ self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
if self.group_by_rds_parameter_group:
- self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
+ self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
- self.push(self.inventory, 'rds', dest)
+ self.push(self.inventory, 'rds', hostname)
- self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
+ self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
+ self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as
@@ -1131,6 +1313,8 @@ class Ec2Inventory(object):
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.items():
+ if self.expand_csv_tags and ',' in v:
+ v = list(map(lambda x: x.strip(), v.split(',')))
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
@@ -1141,6 +1325,10 @@ class Ec2Inventory(object):
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
+ elif key == 'ec2_block_device_mapping':
+ instance_vars["ec2_block_devices"] = {}
+ for k, v in value.items():
+ instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id
else:
pass
# TODO Product codes if someone finds them useful
@@ -1321,4 +1509,3 @@ class Ec2Inventory(object):
# Run the script
Ec2Inventory()
-
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 9caf5408f..0a1b8c5c4 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -84,11 +84,30 @@ openshift_release=v1.4
# Specify exact version of Docker to configure or upgrade to.
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
-# docker_version="1.10.3"
+# docker_version="1.12.1"
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
+
+# Upgrade Hooks
+#
+# Hooks are available to run custom tasks at various points during a cluster
+# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using
+# absolute paths, if not the path will be treated as relative to the file where the
+# hook is actually used.
+#
+# Tasks to run before each master is upgraded.
+# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
+#
+# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible
+# upgrade steps, but before we restart system/services.
+# openshift_master_upgrade_hook=/usr/share/custom/master.yml
+#
+# Tasks to run after each master is upgraded and system/services have been restarted.
+# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
+
+
# Alternate image format string, useful if you've got your own registry mirror
#oreg_url=example.com/openshift3/ose-${component}:${version}
# If oreg_url points to a registry other than registry.access.redhat.com we can
@@ -606,17 +625,39 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
-# environment variables into Builds. These values will default to the global proxy
-# config values. You only need to set these if they differ from the global settings
-# above. See BuildDefaults
-# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+# configuration into Builds. Proxy related values will default to the global proxy
+# config values. You only need to set these if they differ from the global proxy settings.
+# See BuildDefaults documentation at
+# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_no_proxy=build_defaults
+#openshift_builddefaults_no_proxy=mycorp.com
#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
-# Or you may optionally define your own serialized as json
-#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"kind":"BuildDefaultsConfig","apiVersion":"v1","gitHTTPSProxy":"http://proxy.example.com.redhat.com:3128","gitHTTPProxy":"http://proxy.example.com.redhat.com:3128","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"HTTPS_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}]}}'
+#openshift_builddefaults_git_no_proxy=mycorp.com
+#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
+#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
+#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
+#openshift_builddefaults_resources_requests_cpu=100m
+#openshift_builddefaults_resources_requests_memory=256m
+#openshift_builddefaults_resources_limits_cpu=1000m
+#openshift_builddefaults_resources_limits_memory=512m
+
+# Or you may optionally define your own build defaults configuration serialized as json
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
+
+# These options configure the BuildOverrides admission controller which injects
+# configuration into Builds.
+# See BuildOverrides documentation at
+# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_buildoverrides_force_pull=true
+#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
+#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
+#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
+
+# Or you may optionally define your own build overrides configuration serialized as json
+#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
+
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index f1b3165f9..89b9d7e48 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -84,11 +84,30 @@ openshift_release=v3.4
# Specify exact version of Docker to configure or upgrade to.
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
-# docker_version="1.10.3"
+# docker_version="1.12.1"
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
+
+# Upgrade Hooks
+#
+# Hooks are available to run custom tasks at various points during a cluster
+# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using
+# absolute paths, if not the path will be treated as relative to the file where the
+# hook is actually used.
+#
+# Tasks to run before each master is upgraded.
+# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
+#
+# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible
+# upgrade steps, but before we restart system/services.
+# openshift_master_upgrade_hook=/usr/share/custom/master.yml
+#
+# Tasks to run after each master is upgraded and system/services have been restarted.
+# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
+
+
# Alternate image format string, useful if you've got your own registry mirror
#oreg_url=example.com/openshift3/ose-${component}:${version}
# If oreg_url points to a registry other than registry.access.redhat.com we can
@@ -606,17 +625,39 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
-# environment variables into Builds. These values will default to the global proxy
-# config values. You only need to set these if they differ from the global settings
-# above. See BuildDefaults
-# documentation at https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+# configuration into Builds. Proxy related values will default to the global proxy
+# config values. You only need to set these if they differ from the global proxy settings.
+# See BuildDefaults documentation at
+# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_no_proxy=build_defaults
+#openshift_builddefaults_no_proxy=mycorp.com
#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
-# Or you may optionally define your own serialized as json
-#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"kind":"BuildDefaultsConfig","apiVersion":"v1","gitHTTPSProxy":"http://proxy.example.com.redhat.com:3128","gitHTTPProxy":"http://proxy.example.com.redhat.com:3128","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"HTTPS_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}]}}'
+#openshift_builddefaults_git_no_proxy=mycorp.com
+#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
+#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
+#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
+#openshift_builddefaults_resources_requests_cpu=100m
+#openshift_builddefaults_resources_requests_memory=256m
+#openshift_builddefaults_resources_limits_cpu=1000m
+#openshift_builddefaults_resources_limits_memory=512m
+
+# Or you may optionally define your own build defaults configuration serialized as json
+#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
+
+# These options configure the BuildOverrides admission controller which injects
+# configuration into Builds.
+# See BuildOverrides documentation at
+# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
+#openshift_buildoverrides_force_pull=true
+#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
+#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
+#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
+
+# Or you may optionally define your own build overrides configuration serialized as json
+#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
+
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
diff --git a/inventory/gce/hosts/gce.py b/inventory/gce/hosts/gce.py
index cce3c5f35..2be46a58c 100755
--- a/inventory/gce/hosts/gce.py
+++ b/inventory/gce/hosts/gce.py
@@ -70,7 +70,8 @@ Examples:
$ contrib/inventory/gce.py --host my_instance
Author: Eric Johnson <erjohnso@google.com>
-Version: 0.0.1
+Contributors: Matt Hite <mhite@hotmail.com>, Tom Melendez <supertom@google.com>
+Version: 0.0.3
'''
__requires__ = ['pycrypto>=2.6']
@@ -84,13 +85,19 @@ except ImportError:
pass
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
-USER_AGENT_VERSION="v1"
+USER_AGENT_VERSION="v2"
import sys
import os
import argparse
+
+from time import time
+
import ConfigParser
+import logging
+logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler())
+
try:
import json
except ImportError:
@@ -101,31 +108,103 @@ try:
from libcloud.compute.providers import get_driver
_ = Provider.GCE
except:
- print("GCE inventory script requires libcloud >= 0.13")
- sys.exit(1)
+ sys.exit("GCE inventory script requires libcloud >= 0.13")
+
+
+class CloudInventoryCache(object):
+ def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp',
+ cache_max_age=300):
+ cache_dir = os.path.expanduser(cache_path)
+ if not os.path.exists(cache_dir):
+ os.makedirs(cache_dir)
+ self.cache_path_cache = os.path.join(cache_dir, cache_name)
+
+ self.cache_max_age = cache_max_age
+
+ def is_valid(self, max_age=None):
+ ''' Determines if the cache files have expired, or if it is still valid '''
+
+ if max_age is None:
+ max_age = self.cache_max_age
+
+ if os.path.isfile(self.cache_path_cache):
+ mod_time = os.path.getmtime(self.cache_path_cache)
+ current_time = time()
+ if (mod_time + max_age) > current_time:
+ return True
+
+ return False
+
+ def get_all_data_from_cache(self, filename=''):
+ ''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
+
+ data = ''
+ if not filename:
+ filename = self.cache_path_cache
+ with open(filename, 'r') as cache:
+ data = cache.read()
+ return json.loads(data)
+
+ def write_to_cache(self, data, filename=''):
+ ''' Writes data to file as JSON. Returns True. '''
+ if not filename:
+ filename = self.cache_path_cache
+ json_data = json.dumps(data)
+ with open(filename, 'w') as cache:
+ cache.write(json_data)
+ return True
class GceInventory(object):
def __init__(self):
+ # Cache object
+ self.cache = None
+ # dictionary containing inventory read from disk
+ self.inventory = {}
+
# Read settings and parse CLI arguments
self.parse_cli_args()
+ self.config = self.get_config()
self.driver = self.get_gce_driver()
+ self.ip_type = self.get_inventory_options()
+ if self.ip_type:
+ self.ip_type = self.ip_type.lower()
+
+ # Cache management
+ start_inventory_time = time()
+ cache_used = False
+ if self.args.refresh_cache or not self.cache.is_valid():
+ self.do_api_calls_update_cache()
+ else:
+ self.load_inventory_from_cache()
+ cache_used = True
+ self.inventory['_meta']['stats'] = {'use_cache': True}
+ self.inventory['_meta']['stats'] = {
+ 'inventory_load_time': time() - start_inventory_time,
+ 'cache_used': cache_used
+ }
# Just display data for specific host
if self.args.host:
- print(self.json_format_dict(self.node_to_dict(
- self.get_instance(self.args.host)),
- pretty=self.args.pretty))
- sys.exit(0)
-
- # Otherwise, assume user wants all instances grouped
- print(self.json_format_dict(self.group_instances(),
- pretty=self.args.pretty))
+ print(self.json_format_dict(
+ self.inventory['_meta']['hostvars'][self.args.host],
+ pretty=self.args.pretty))
+ else:
+ # Otherwise, assume user wants all instances grouped
+ zones = self.parse_env_zones()
+ print(self.json_format_dict(self.inventory,
+ pretty=self.args.pretty))
sys.exit(0)
- def get_gce_driver(self):
- """Determine the GCE authorization settings and return a
- libcloud driver.
+ def get_config(self):
+ """
+ Reads the settings from the gce.ini file.
+
+ Populates a SafeConfigParser object with defaults and
+ attempts to read an .ini-style configuration from the filename
+ specified in GCE_INI_PATH. If the environment variable is
+ not present, the filename defaults to gce.ini in the current
+ working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
@@ -140,14 +219,57 @@ class GceInventory(object):
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'libcloud_secrets': '',
+ 'inventory_ip_type': '',
+ 'cache_path': '~/.ansible/tmp',
+ 'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
+ if 'inventory' not in config.sections():
+ config.add_section('inventory')
+ if 'cache' not in config.sections():
+ config.add_section('cache')
+
config.read(gce_ini_path)
+ #########
+ # Section added for processing ini settings
+ #########
+
+ # Set the instance_states filter based on config file options
+ self.instance_states = []
+ if config.has_option('gce', 'instance_states'):
+ states = config.get('gce', 'instance_states')
+ # Ignore if instance_states is an empty string.
+ if states:
+ self.instance_states = states.split(',')
+
+ # Caching
+ cache_path = config.get('cache', 'cache_path')
+ cache_max_age = config.getint('cache', 'cache_max_age')
+ # TOOD(supertom): support project-specific caches
+ cache_name = 'ansible-gce.cache'
+ self.cache = CloudInventoryCache(cache_path=cache_path,
+ cache_max_age=cache_max_age,
+ cache_name=cache_name)
+ return config
+
+ def get_inventory_options(self):
+ """Determine inventory options. Environment variables always
+ take precedence over configuration files."""
+ ip_type = self.config.get('inventory', 'inventory_ip_type')
+ # If the appropriate environment variables are set, they override
+ # other configuration
+ ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
+ return ip_type
+
+ def get_gce_driver(self):
+ """Determine the GCE authorization settings and return a
+ libcloud driver.
+ """
# Attempt to get GCE params from a configuration file, if one
# exists.
- secrets_path = config.get('gce', 'libcloud_secrets')
+ secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
@@ -161,8 +283,7 @@ class GceInventory(object):
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
- print(err)
- sys.exit(1)
+ sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
@@ -173,10 +294,10 @@ class GceInventory(object):
pass
if not secrets_found:
args = [
- config.get('gce','gce_service_account_email_address'),
- config.get('gce','gce_service_account_pem_file_path')
+ self.config.get('gce','gce_service_account_email_address'),
+ self.config.get('gce','gce_service_account_pem_file_path')
]
- kwargs = {'project': config.get('gce', 'gce_project_id')}
+ kwargs = {'project': self.config.get('gce', 'gce_project_id')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
@@ -191,6 +312,14 @@ class GceInventory(object):
)
return gce
+ def parse_env_zones(self):
+ '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
+ If provided, this will be used to filter the results of the grouped_instances call'''
+ import csv
+ reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
+ zones = [r for r in reader]
+ return [z for z in zones[0]]
+
def parse_cli_args(self):
''' Command line argument processing '''
@@ -202,6 +331,9 @@ class GceInventory(object):
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
+ parser.add_argument(
+ '--refresh-cache', action='store_true', default=False,
+ help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
@@ -211,11 +343,17 @@ class GceInventory(object):
if inst is None:
return {}
- if inst.extra['metadata'].has_key('items'):
+ if 'items' in inst.extra['metadata']:
for entry in inst.extra['metadata']['items']:
md[entry['key']] = entry['value']
net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
+ # default to exernal IP unless user has specified they prefer internal
+ if self.ip_type == 'internal':
+ ssh_host = inst.private_ips[0]
+ else:
+ ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+
return {
'gce_uuid': inst.uuid,
'gce_id': inst.id,
@@ -231,29 +369,67 @@ class GceInventory(object):
'gce_metadata': md,
'gce_network': net,
# Hosts don't have a public name, so we add an IP
- 'ansible_ssh_host': inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0]
+ 'ansible_ssh_host': ssh_host
}
- def get_instance(self, instance_name):
- '''Gets details about a specific instance '''
+ def load_inventory_from_cache(self):
+ ''' Loads inventory from JSON on disk. '''
+
try:
- return self.driver.ex_get_node(instance_name)
+ self.inventory = self.cache.get_all_data_from_cache()
+ hosts = self.inventory['_meta']['hostvars']
except Exception as e:
- return None
-
- def group_instances(self):
+ print(
+ "Invalid inventory file %s. Please rebuild with -refresh-cache option."
+ % (self.cache.cache_path_cache))
+ raise
+
+ def do_api_calls_update_cache(self):
+ ''' Do API calls and save data in cache. '''
+ zones = self.parse_env_zones()
+ data = self.group_instances(zones)
+ self.cache.write_to_cache(data)
+ self.inventory = data
+
+ def list_nodes(self):
+ all_nodes = []
+ params, more_results = {'maxResults': 500}, True
+ while more_results:
+ self.driver.connection.gce_params=params
+ all_nodes.extend(self.driver.list_nodes())
+ more_results = 'pageToken' in params
+ return all_nodes
+
+ def group_instances(self, zones=None):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
- for node in self.driver.list_nodes():
+ for node in self.list_nodes():
+
+ # This check filters on the desired instance states defined in the
+ # config file with the instance_states config option.
+ #
+ # If the instance_states list is _empty_ then _ALL_ states are returned.
+ #
+ # If the instance_states list is _populated_ then check the current
+ # state against the instance_states list
+ if self.instance_states and not node.extra['status'] in self.instance_states:
+ continue
+
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
- if groups.has_key(zone): groups[zone].append(name)
+
+ # To avoid making multiple requests per zone
+ # we list all nodes and then filter the results
+ if zones and zone not in zones:
+ continue
+
+ if zone in groups: groups[zone].append(name)
else: groups[zone] = [name]
tags = node.extra['tags']
@@ -262,25 +438,25 @@ class GceInventory(object):
tag = t[6:]
else:
tag = 'tag_%s' % t
- if groups.has_key(tag): groups[tag].append(name)
+ if tag in groups: groups[tag].append(name)
else: groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
- if groups.has_key(net): groups[net].append(name)
+ if net in groups: groups[net].append(name)
else: groups[net] = [name]
machine_type = node.size
- if groups.has_key(machine_type): groups[machine_type].append(name)
+ if machine_type in groups: groups[machine_type].append(name)
else: groups[machine_type] = [name]
image = node.image and node.image or 'persistent_disk'
- if groups.has_key(image): groups[image].append(name)
+ if image in groups: groups[image].append(name)
else: groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
- if groups.has_key(stat): groups[stat].append(name)
+ if stat in groups: groups[stat].append(name)
else: groups[stat] = [name]
groups["_meta"] = meta
@@ -296,6 +472,6 @@ class GceInventory(object):
else:
return json.dumps(data)
-
# Run the script
-GceInventory()
+if __name__ == '__main__':
+ GceInventory()
diff --git a/inventory/libvirt/hosts/libvirt_generic.py b/inventory/libvirt/hosts/libvirt_generic.py
index ac2f0430a..d63e07b64 100755
--- a/inventory/libvirt/hosts/libvirt_generic.py
+++ b/inventory/libvirt/hosts/libvirt_generic.py
@@ -61,11 +61,11 @@ class LibvirtInventory(object):
self.parse_cli_args()
if self.args.host:
- print _json_format_dict(self.get_host_info(), self.args.pretty)
+ print(_json_format_dict(self.get_host_info(), self.args.pretty))
elif self.args.list:
- print _json_format_dict(self.get_inventory(), self.args.pretty)
+ print(_json_format_dict(self.get_inventory(), self.args.pretty))
else: # default action with no options
- print _json_format_dict(self.get_inventory(), self.args.pretty)
+ print(_json_format_dict(self.get_inventory(), self.args.pretty))
def read_settings(self):
''' Reads the settings from the libvirt.ini file '''
@@ -115,12 +115,12 @@ class LibvirtInventory(object):
conn = libvirt.openReadOnly(self.libvirt_uri)
if conn is None:
- print "Failed to open connection to %s" % self.libvirt_uri
+ print("Failed to open connection to %s" % self.libvirt_uri)
sys.exit(1)
domains = conn.listAllDomains()
if domains is None:
- print "Failed to list domains for connection %s" % self.libvirt_uri
+ print("Failed to list domains for connection %s" % self.libvirt_uri)
sys.exit(1)
for domain in domains:
diff --git a/library/modify_yaml.py b/library/modify_yaml.py
index 000c7d9a2..8706e80c2 100755
--- a/library/modify_yaml.py
+++ b/library/modify_yaml.py
@@ -6,6 +6,11 @@
import yaml
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+from ansible.module_utils.basic import * # noqa: F402,F403
+
+
DOCUMENTATION = '''
---
module: modify_yaml
@@ -21,19 +26,36 @@ EXAMPLES = '''
'''
-# pylint: disable=missing-docstring
def set_key(yaml_data, yaml_key, yaml_value):
+ ''' Updates a parsed yaml structure setting a key to a value.
+
+ :param yaml_data: yaml structure to modify.
+ :type yaml_data: dict
+ :param yaml_key: Key to modify.
+ :type yaml_key: mixed
+ :param yaml_value: Value use for yaml_key.
+ :type yaml_value: mixed
+ :returns: Changes to the yaml_data structure
+ :rtype: dict(tuple())
+ '''
changes = []
ptr = yaml_data
+ final_key = yaml_key.split('.')[-1]
for key in yaml_key.split('.'):
- if key not in ptr and key != yaml_key.split('.')[-1]:
+ # Key isn't present and we're not on the final key. Set to empty dictionary.
+ if key not in ptr and key != final_key:
ptr[key] = {}
ptr = ptr[key]
- elif key == yaml_key.split('.')[-1]:
+ # Current key is the final key. Update value.
+ elif key == final_key:
if (key in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr): # noqa: F405
ptr[key] = yaml_value
changes.append((yaml_key, yaml_value))
else:
+ # Next value is None and we're not on the final key.
+ # Turn value into an empty dictionary.
+ if ptr[key] is None and key != final_key:
+ ptr[key] = {}
ptr = ptr[key]
return changes
@@ -68,37 +90,30 @@ def main():
# pylint: disable=missing-docstring, unused-argument
def none_representer(dumper, data):
return yaml.ScalarNode(tag=u'tag:yaml.org,2002:null', value=u'')
+
yaml.add_representer(type(None), none_representer)
try:
-
- yaml_file = open(dest)
- yaml_data = yaml.safe_load(yaml_file.read())
- yaml_file.close()
+ with open(dest) as yaml_file:
+ yaml_data = yaml.safe_load(yaml_file.read())
changes = set_key(yaml_data, yaml_key, yaml_value)
if len(changes) > 0:
if backup:
module.backup_local(dest)
- yaml_file = open(dest, 'w')
- yaml_string = yaml.dump(yaml_data, default_flow_style=False)
- yaml_string = yaml_string.replace('\'\'', '""')
- yaml_file.write(yaml_string)
- yaml_file.close()
+ with open(dest, 'w') as yaml_file:
+ yaml_string = yaml.dump(yaml_data, default_flow_style=False)
+ yaml_string = yaml_string.replace('\'\'', '""')
+ yaml_file.write(yaml_string)
return module.exit_json(changed=(len(changes) > 0), changes=changes)
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
- except Exception, e:
- return module.fail_json(msg=str(e))
-
+ except Exception as error:
+ return module.fail_json(msg=str(error))
-# ignore pylint errors related to the module_utils import
-# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, wrong-import-position
-# import module snippets
-from ansible.module_utils.basic import * # noqa: F402,F403
if __name__ == '__main__':
main()
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 4961d23ef..0b7c44660 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.4.17
+Version: 3.5.1
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -15,7 +15,11 @@ BuildArch: noarch
Requires: ansible >= 2.2.0.0-1
Requires: python2
+Requires: python-six
+Requires: tar
Requires: openshift-ansible-docs = %{version}-%{release}
+Requires: java-1.8.0-openjdk-headless
+Requires: httpd-tools
%description
Openshift and Atomic Enterprise Ansible
@@ -249,6 +253,351 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed Jan 18 2017 Scott Dodson <sdodson@redhat.com> 3.5.1-1
+- More reliable wait for master after full host reboot. (dgoodwin@redhat.com)
+- kubelet must have rw to cgroups for pod/qos cgroups to function
+ (decarr@redhat.com)
+- Adding a few updates for python27,35 compatibility (kwoodson@redhat.com)
+- update examples to cover build default/override configuration
+ (bparees@redhat.com)
+- Fix yaml lint in easy-mode playbook (tbielawa@redhat.com)
+- Removed trailing spaces from line #34 (kunallimaye@gmail.com)
+- Install subscription-manager to fix issue-3102 (kunallimaye@gmail.com)
+- Changing formatting for issue#2244 update (kunallimaye@gmail.com)
+- Addressing Travis errors (ewolinet@redhat.com)
+- Adding --verfiy to generate script. (kwoodson@redhat.com)
+- v1.3 Add RHAMP (sdodson@redhat.com)
+- Update v1.4 content, add api-gateway (sdodson@redhat.com)
+- Add v1.5 content (sdodson@redhat.com)
+- Update example sync script (sdodson@redhat.com)
+- use pod to generate keystores (#14) (jcantrill@users.noreply.github.com)
+- Ensure serial certificate generation for node and master certificates.
+ (abutcher@redhat.com)
+- [Cert Expiry] Add serial numbers, include example PBs, docs
+ (tbielawa@redhat.com)
+- properly set changes when oc apply (jcantril@redhat.com)
+- additional cr fixes (jcantril@redhat.com)
+- metrics fixes for yamlint (jcantril@redhat.com)
+- additional code reviews (jcantril@redhat.com)
+- set replicas to current value so not to disrupt current pods (#13)
+ (jcantrill@users.noreply.github.com)
+- User provided certs pushed from control. vars reorg (#12)
+ (jcantrill@users.noreply.github.com)
+- update vars to allow scaling of components (#9)
+ (jcantrill@users.noreply.github.com)
+- allow definition of cpu/memory limits/resources (#11)
+ (jcantrill@users.noreply.github.com)
+- rename variables to be less extraneous (#10)
+ (jcantrill@users.noreply.github.com)
+- copy admin cert for use in subsequent tasks (#8)
+ (jcantrill@users.noreply.github.com)
+- Add tasks to uninstall metrics (#7) (jcantrill@users.noreply.github.com)
+- Custom certificates (#5) (bbarcaro@redhat.com)
+- prefix vars with metrics role (#4) (jcantrill@users.noreply.github.com)
+- Bruno Barcarol Guimarães work to move metrics to ansible from deployer
+ (jcantril@redhat.com)
+- Adding oc_edit module to lib_openshift. (kwoodson@redhat.com)
+- Create individual serving cert and loopback kubeconfig for additional
+ masters. (abutcher@redhat.com)
+- add configuration for build default+overrides settings (bparees@redhat.com)
+- delete idempotent (ewolinet@redhat.com)
+- additional comments addressed (ewolinet@redhat.com)
+- Updating upgrade_logging to be more idempotent (ewolinet@redhat.com)
+- Using oc_apply task for idempotent (ewolinet@redhat.com)
+- Removing shell module calls and cleaning up changed (ewolinet@redhat.com)
+- lib_openshift modules. This is the first one. oc_route.
+ (kwoodson@redhat.com)
+- Updated modify_yaml with docstring and clarifications (smilner@redhat.com)
+- Rename subrole facts -> init (rhcarvalho@gmail.com)
+- Move Python modules into role (rhcarvalho@gmail.com)
+- Document playbook directories (rhcarvalho@gmail.com)
+- Document bin/cluster tool (rhcarvalho@gmail.com)
+- keys should be lowercase according to the spec (jf.cron0@gmail.com)
+- filter: Removed unused validation calls (smilner@redhat.com)
+- Updated initializer usage in filters (smilner@redhat.com)
+- fix when statement indentation, cast to bool (jf.cron0@gmail.com)
+- add openshift_facts as role dependency (jf.cron0@gmail.com)
+- Added setup.py to flake8 tests (smilner@redhat.com)
+- Do not default registry storage kind to 'nfs' when 'nfs' group exists.
+ (abutcher@redhat.com)
+- Fix inconsistent task name (rhcarvalho@gmail.com)
+- Reduce code duplication using variable (rhcarvalho@gmail.com)
+- Another proposed update to the issue template (tbielawa@redhat.com)
+- Replace custom variables with openshift_facts (rhcarvalho@gmail.com)
+- Catch DBus exceptions on class initialization (rhcarvalho@gmail.com)
+- addressing comments (ewolinet@redhat.com)
+- Move playbook to BYO (rhcarvalho@gmail.com)
+- Fix typo in inventory README.md (lberk@redhat.com)
+- Refactor preflight check into roles (rhcarvalho@gmail.com)
+- Make flake8 (py35) happy on bare except (rhcarvalho@gmail.com)
+- Make callback plugin an always-on aggregate plugin (rhcarvalho@gmail.com)
+- Add RPM checks as an adhoc playbook (rhcarvalho@gmail.com)
+- first swing at release version wording (timbielawa@gmail.com)
+- Correct tox to run on Travis (rteague@redhat.com)
+- Adding ability to systematically modify yaml from ansible.
+ (kwoodson@redhat.com)
+- oo_filters: Moved static methods to functions (smilner@redhat.com)
+- Correct return code compairison for yamllint (rteague@redhat.com)
+- Add a fact to select --evacuate or --drain based on your OCP version
+ (tbielawa@redhat.com)
+- Update branch status (sdodson@redhat.com)
+- rename openshift_metrics to openshift_hosted_metrics (jcantril@redhat.com)
+- Update aws dynamic inventory (lhuard@amadeus.com)
+- improve issue template (sdodson@redhat.com)
+- cleanup: Removed debug prints from tests (smilner@redhat.com)
+- remove debug statement from test (jdetiber@redhat.com)
+- Support openshift_node_port_range for configuring service NodePorts
+ (ccoleman@redhat.com)
+- Workaround for dnf+docker version race condition (smilner@redhat.com)
+- use etcdctl from the container when containerized=True (gscrivan@redhat.com)
+- Partial uninstall (sejug@redhat.com)
+- increase test coverage (jdetiber@redhat.com)
+- Update aws dynamic inventory (lhuard@amadeus.com)
+- update travis to use tox for utils (jdetiber@redhat.com)
+- More toxification (jdetiber@redhat.com)
+- add test for utils to bump coverage (jdetiber@redhat.com)
+- The scaleup subcommand does not support the unattended option
+ (tbielawa@redhat.com)
+- Move role dependencies out of playbooks for openshift_master, openshift_node
+ and openshift_hosted. (abutcher@redhat.com)
+- Remove unused file (rhcarvalho@gmail.com)
+- Remove unused file (rhcarvalho@gmail.com)
+- Remove spurious argument (rhcarvalho@gmail.com)
+- Fixing collision of system.admin cert generation (ewolinet@redhat.com)
+- minor updates for code reviews, remove unused params (jcantril@redhat.com)
+- Updating to use deployer pod to generate JKS chain instead
+ (ewolinet@redhat.com)
+- Creating openshift_logging role for deploying Aggregated Logging without a
+ deployer image (ewolinet@redhat.com)
+- Begin requiring Docker 1.12. (dgoodwin@redhat.com)
+
+* Mon Jan 09 2017 Scott Dodson <sdodson@redhat.com> 3.5.0-1
+- Update manpage version. (tbielawa@redhat.com)
+- Fix openshift_image_tag=latest. (abutcher@redhat.com)
+- Use registry.access.redhat.com/rhel7/etcd instead of etcd3
+ (sdodson@redhat.com)
+- Fix repo defaults (sdodson@redhat.com)
+- Use openshift.common.hostname when verifying API port available.
+ (abutcher@redhat.com)
+- Re-add when condition which was removed mistakenly in #3036
+ (maszulik@redhat.com)
+- logging-deployer pull fixes from origin-aggregated-logging/#317
+ (sdodson@redhat.com)
+- Don't upgrade etcd on atomic host, ever. (sdodson@redhat.com)
+- Change wording in the quick installer callback plugin (tbielawa@redhat.com)
+- Fix jsonpath expected output when checking registry volume secrets
+ (maszulik@redhat.com)
+- Enable repos defined in openshift_additional_repos by default
+ (sdodson@redhat.com)
+- Add required python-six package to installation (tbielawa@redhat.com)
+- Hush the sudo privs check in oo-installer (tbielawa@redhat.com)
+- Add future versions to openshift_facts (ccoleman@redhat.com)
+- Cast openshift_enable_origin_repo to bool. (abutcher@redhat.com)
+- Update CFME template to point to GA build (simaishi@redhat.com)
+- Update aoi manpage with correct operation count (tbielawa@redhat.com)
+- Add templates for CFME Beta pod images (simaishi@redhat.com)
+- Add osnl_volume_reclaim_policy variable to nfs_lvm role
+ (ando.roots@bigbank.ee)
+- remove duplicate filter name and oo_pdb (jdetiber@redhat.com)
+- remove old Ops tooling (jdetiber@redhat.com)
+- enable pip cache for travis (jdetiber@redhat.com)
+- python3 support, add tox for better local testing against multiple python
+ versions (jdetiber@redhat.com)
+- modify_yaml: handle None value during update. (abutcher@redhat.com)
+- Update the openshift-certificate-expiry README to reflect latest changes
+ (tbielawa@redhat.com)
+- Deprecate node 'evacuation' with 'drain' (tbielawa@redhat.com)
+- Add master config hook for 3.4 upgrade and fix facts ordering for config hook
+ run. (abutcher@redhat.com)
+- The next registry.access.redhat.com/rhel7/etcd image will be 3.0.15
+ (sdodson@redhat.com)
+- [uninstall] Remove excluder packages (sdodson@redhat.com)
+- Check embedded etcd certs now, too (tbielawa@redhat.com)
+- Include 'total' and 'ok' in check results (tbielawa@redhat.com)
+- Enable firewalld by default (rteague@redhat.com)
+- Fix access_modes initialization (luis.fernandezalvarez@epfl.ch)
+- Updated OpenShift Master iptables rules (rteague@redhat.com)
+- YAML Linting (rteague@redhat.com)
+- Make both backup and upgrade optional (sdodson@redhat.com)
+- [upgrades] Upgrade etcd by default (sdodson@redhat.com)
+- upgrades - Fix logic error about when to backup etcd (sdodson@redhat.com)
+- Limit node certificate SAN to node hostnames/ips. (abutcher@redhat.com)
+- Make 'cover-erase' a config file setting. Move VENT target to pre-req for all
+ ci-* targets (tbielawa@redhat.com)
+- Fixes to 'make ci' (tbielawa@redhat.com)
+- Resolved lint issues (rteague@redhat.com)
+- Minimum Ansible version check (rteague@redhat.com)
+- Removed verify_ansible_version playbook refs (rteague@redhat.com)
+- Fix coverage not appending new data (tbielawa@redhat.com)
+- Drop 3.2 upgrade playbooks. (dgoodwin@redhat.com)
+- Silence warnings when using rpm directly (dag@wieers.com)
+- Silence warnings when using rpm directly (dag@wieers.com)
+- Silence warnings when using rpm directly (dag@wieers.com)
+- Remove Hostname from 1.1 and 1.2 predicates (jdetiber@redhat.com)
+- Properly handle x.y.z formatted versions for openshift_release
+ (jdetiber@redhat.com)
+- etcd_upgrade: Simplify package installation (sdodson@redhat.com)
+- Speed up 'make ci' and trim the output (tbielawa@redhat.com)
+- add comments and remove debug code (jdetiber@redhat.com)
+- Pre-pull master/node/ovs images during upgrade. (dgoodwin@redhat.com)
+- Handle updating of scheduler config during upgrade (jdetiber@redhat.com)
+- Fix templating (jdetiber@redhat.com)
+- test updates (jdetiber@redhat.com)
+- Always install latest etcd for containerized hosts (sdodson@redhat.com)
+- etcd_upgrade : Use different variables for rpm vs container versions
+ (sdodson@redhat.com)
+- Switch back to using etcd rather than etcd3 (sdodson@redhat.com)
+- node_dnsmasq - restart dnsmasq if it's not currently running
+ (sdodson@redhat.com)
+- Conditionalize master config update for admission_plugin_config.
+ (abutcher@redhat.com)
+- upgrade_control_plane.yml: systemd_units.yaml nees the master facts
+ (mchappel@redhat.com)
+- openshift-master/restart : use openshift.common.hostname instead of
+ inventory_hostname (mchappel@redhat.com)
+- Update scheduler predicate/priorities vars (jdetiber@redhat.com)
+- fix tags (jdetiber@redhat.com)
+- openshift_node_dnsmasq - Remove strict-order option from dnsmasq
+ (sdodson@redhat.com)
+- Fix metricsPublicURL only being set correctly on first master.
+ (dgoodwin@redhat.com)
+- Explictly set etcd vars for byo scaleup (smunilla@redhat.com)
+- Cleanup ovs file and restart docker on every upgrade. (dgoodwin@redhat.com)
+- Sync latest image stream and templates for v1.3 and v1.4 (sdodson@redhat.com)
+- xpaas v1.3.5 (sdodson@redhat.com)
+- Ansible version check update (tbielawa@redhat.com)
+- allow 'latest' origin_image_tag (sjenning@redhat.com)
+- Remove duplicate when key (rteague@redhat.com)
+- refactor handling of scheduler defaults (jdetiber@redhat.com)
+- update tests and flake8/pylint fixes (jdetiber@redhat.com)
+- fix tagging (jdetiber@redhat.com)
+- do not report changed for group mapping (jdetiber@redhat.com)
+- fix selinux issues with etcd container (dusty@dustymabe.com)
+- etcd upgrade playbook is not currently applicable to embedded etcd installs
+ (sdodson@redhat.com)
+- Fix invalid embedded etcd fact in etcd upgrade playbook.
+ (dgoodwin@redhat.com)
+- Gracefully handle OpenSSL module absence (misc@redhat.com)
+- Refactored to use Ansible systemd module (rteague@redhat.com)
+- Updating docs for Ansible 2.2 requirements (rteague@redhat.com)
+- Fix the list done after cluster creation on libvirt and OpenStack
+ (lhuard@amadeus.com)
+- Set nameservers on DHCPv6 event (alexandre.lossent@cern.ch)
+- Systemd `systemctl show` workaround (rteague@redhat.com)
+- Verify the presence of dbus python binding (misc@redhat.com)
+- Update README.md (jf.cron0@gmail.com)
+- Reference master binaries when delegating from node hosts which may be
+ containerized. (abutcher@redhat.com)
+- Merge kube_admission_plugin_config with admission_plugin_config
+ (smunilla@redhat.com)
+- Added a BYO playbook for configuring NetworkManager on nodes
+ (skuznets@redhat.com)
+- Make the role work on F25 Cloud (misc@redhat.com)
+- Make os_firewall_manage_iptables run on python3 (misc@redhat.com)
+- Modified the error message being checked for (vishal.patil@nuagenetworks.net)
+- Only run tuned-adm if tuned exists. (dusty@dustymabe.com)
+- Delegate openshift_manage_node tasks to master host. (abutcher@redhat.com)
+- Fix rare failure to deploy new registry/router after upgrade.
+ (dgoodwin@redhat.com)
+- Refactor os_firewall role (rteague@redhat.com)
+- Allow ansible to continue when a node is unaccessible or fails.
+ (abutcher@redhat.com)
+- Create the file in two passes, atomicly copy it over (sdodson@redhat.com)
+- Escape LOGNAME variable according to GCE rules (jacek.suchenia@ocado.com)
+- node_dnsmasq -- Set dnsmasq as our only nameserver (sdodson@redhat.com)
+- Refactor to use Ansible package module (rteague@redhat.com)
+- Allow users to disable the origin repo creation (sdodson@redhat.com)
+- Fix yum/subman version check on Atomic. (dgoodwin@redhat.com)
+- Check for bad versions of yum and subscription-manager. (dgoodwin@redhat.com)
+- Corrected syntax and typos (rteague@redhat.com)
+- Fix GCE cluster creation (lhuard@amadeus.com)
+- Optimize the cloud-specific list.yml playbooks (lhuard@amadeus.com)
+- Added ip forwarding for nuage (vishal.patil@nuagenetworks.net)
+- Fix typo (sdodson@redhat.com)
+- Fix a few places where we're not specifying the admin kubeconfig
+ (sdodson@redhat.com)
+- Add rolebinding-reader (sdodson@redhat.com)
+- Add view permissions to hawkular sa (sdodson@redhat.com)
+- Use multiple '-v's when creating the metrics deployer command
+ (tbielawa@redhat.com)
+- Sync logging deployer changes from origin to enterprise (sdodson@redhat.com)
+- Docker daemon is started prematurely. (eric.mountain@amadeus.com)
+- Sync latest enterprise/metrics-deployer.yaml (sdodson@redhat.com)
+- Sync latest s2i content (sdodson@redhat.com)
+- Actually upgrade host etcdctl no matter what (sdodson@redhat.com)
+- Make etcd containerized upgrade stepwise (sdodson@redhat.com)
+- Fix commit-offsets in version detection for containerized installs
+ (tbielawa@redhat.com)
+- Fix HA upgrade when fact cache deleted. (dgoodwin@redhat.com)
+- Fix openshift_hosted_metrics_deployer_version set_fact. (abutcher@redhat.com)
+- Added dependency of os_firewall to docker role (rteague@redhat.com)
+- Add updates for containerized (sdodson@redhat.com)
+- Add etcd upgrade for RHEL and Fedora (sdodson@redhat.com)
+- Drop /etc/profile.d/etcdctl.sh (sdodson@redhat.com)
+- Move backups to a separate file for re-use (sdodson@redhat.com)
+- Uninstall etcd3 package (sdodson@redhat.com)
+- Resolve docker and iptables service dependencies (rteague@redhat.com)
+- Add Travis integration (rhcarvalho@gmail.com)
+- Default groups.oo_etcd_to_config when setting embedded_etcd in control plane
+ upgrade. (abutcher@redhat.com)
+- Enable quiet output for all a-o-i commands (tbielawa@redhat.com)
+- Update override cluster_hostname (smunilla@redhat.com)
+- Reconcile role bindings for jenkins pipeline during upgrade.
+ (dgoodwin@redhat.com)
+- Fix typos in openshift_facts gce cloud provider (sdodson@redhat.com)
+- Don't upgrade etcd on backup operations (sdodson@redhat.com)
+- Bump ansible requirement to 2.2.0.0-1 (GA) (sdodson@redhat.com)
+- Fix etcd backup failure due to corrupted facts. (dgoodwin@redhat.com)
+- Re-sync v1.4 image streams (andrew@andrewklau.com)
+- Revert "Revert openshift.node.nodename changes" (sdodson@redhat.com)
+- Change to allow cni deployments without openshift SDN (yfauser@vmware.com)
+- README: fix markdown formatting (rhcarvalho@gmail.com)
+- Create contribution guide (rhcarvalho@gmail.com)
+- Remove README_AEP.md (rhcarvalho@gmail.com)
+- Install flannel RPM on containerized but not atomic (sdodson@redhat.com)
+- README: move structure overview to the top (rhcarvalho@gmail.com)
+- README: cleanup setup steps (rhcarvalho@gmail.com)
+- README: remove OSX setup requirements (rhcarvalho@gmail.com)
+- Add missing symlink for node openvswitch oom fix. (dgoodwin@redhat.com)
+- README: improve first paragraph (rhcarvalho@gmail.com)
+- README: add links, fix typos (rhcarvalho@gmail.com)
+- README: improve markdown formatting (rhcarvalho@gmail.com)
+- Make it easier to run Python tests (rhcarvalho@gmail.com)
+- FIx flannel var name (jprovazn@redhat.com)
+- Always add local dns domain to no_proxy (jawed.khelil@amadeus.com)
+- Refactor default sdn_cluster_network_cidr and sdn_host_subnet_length
+ (sdodson@redhat.com)
+- Revert "Fix the nodeName of the OpenShift nodes on OpenStack"
+ (sdodson@redhat.com)
+- Revert "Fix OpenStack cloud provider" (sdodson@redhat.com)
+- Revert "Check that OpenStack hostnames are resolvable" (sdodson@redhat.com)
+- set AWS creds task with no_logs (somalley@redhat.com)
+- Change the logic to just compare against masters and nodes.
+ (tbielawa@redhat.com)
+- Append /inventory/README.md to explain what is BYO inventory folder #2742
+ (contact@stephane-klein.info)
+- Remove unused openshift-ansible/inventory/hosts file #2740 (contact@stephane-
+ klein.info)
+- Remove unused playbooks adhoc metrics_setup files #2717 (contact@stephane-
+ klein.info)
+- a-o-i: remove dummy data_file (rhcarvalho@gmail.com)
+- a-o-i: remove script leftover from OpenShift v2 (rhcarvalho@gmail.com)
+- [openstack] allows timeout option for heat create stack
+ (douglaskippsmith@gmail.com)
+- [openstack] updates documentation to show that you need to install shade
+ (douglaskippsmith@gmail.com)
+- default to multizone GCE config (sjenning@redhat.com)
+- Add some tests for utils to get the coverage up. (tbielawa@redhat.com)
+- Update defaults for clusterNetworkCIDR & hostSubnetLength
+ (smunilla@redhat.com)
+- Add hawkular admin cluster role to management admin (fsimonce@redhat.com)
+- Prevent useless master by reworking template for master service enf file
+ (jkhelil@gmail.com)
+- support 3rd party scheduler (jannleno1@gmail.com)
+- Add nuage rest server port to haproxy firewall rules. (abutcher@redhat.com)
+- Port openshift_facts to py3 (misc@redhat.com)
+- storage/nfs_lvm: Also export as ReadWriteOnce (walters@verbum.org)
+
* Fri Nov 04 2016 Scott Dodson <sdodson@redhat.com> 3.4.17-1
- Fix indentation for flannel etcd vars (smunilla@redhat.com)
- Update hosted_templates (sdodson@redhat.com)
diff --git a/playbooks/README.md b/playbooks/README.md
new file mode 100644
index 000000000..5857a9f59
--- /dev/null
+++ b/playbooks/README.md
@@ -0,0 +1,19 @@
+# openshift-ansible playbooks
+
+In summary:
+
+- [`byo`](byo) (_Bring Your Own_ hosts) has the most actively maintained
+ playbooks for installing, upgrading and performing others tasks on OpenShift
+ clusters.
+- [`common`](common) has a set of playbooks that are included by playbooks in
+ `byo` and others.
+
+And:
+
+- [`adhoc`](adhoc) is a generic home for playbooks and tasks that are community
+ supported and not officially maintained.
+- [`aws`](aws), [`gce`](gce), [`libvirt`](libvirt) and [`openstack`](openstack)
+ are related to the [`bin/cluster`](../bin) tool and its usage is deprecated.
+
+Refer to the `README.md` file in each playbook directory for more information
+about them.
diff --git a/playbooks/adhoc/README.md b/playbooks/adhoc/README.md
new file mode 100644
index 000000000..69b9d3135
--- /dev/null
+++ b/playbooks/adhoc/README.md
@@ -0,0 +1,5 @@
+# _Ad hoc_ playbooks
+
+This directory holds playbooks and tasks that really don't have a better home.
+Existing playbooks living here are community supported and not officially
+maintained.
diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py
index c19274e06..daff68fbe 100644
--- a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
+++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py
@@ -5,22 +5,11 @@
Custom filters for use in openshift-ansible
'''
-import pdb
-
class FilterModule(object):
''' Custom ansible filters '''
@staticmethod
- def oo_pdb(arg):
- ''' This pops you into a pdb instance where arg is the data passed in
- from the filter.
- Ex: "{{ hostvars | oo_pdb }}"
- '''
- pdb.set_trace()
- return arg
-
- @staticmethod
def translate_volume_name(volumes, target_volume):
'''
This filter matches a device string /dev/sdX to /dev/xvdX
diff --git a/playbooks/adhoc/noc/create_host.yml b/playbooks/adhoc/noc/create_host.yml
deleted file mode 100644
index 318396bcc..000000000
--- a/playbooks/adhoc/noc/create_host.yml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-- name: 'Create a host object in zabbix'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- post_tasks:
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Template
- state: list
- params:
- host: ctr_test_kwoodson
- filter:
- host:
- - ctr_kwoodson_test_tmpl
-
- register: tmpl_results
-
- - debug: var=tmpl_results
-
-#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
-- name: 'Create a host object in zabbix'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- post_tasks:
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Host
- state: absent
- params:
- host: ctr_test_kwoodson
- interfaces:
- - type: 1
- main: 1
- useip: 1
- ip: 127.0.0.1
- dns: ""
- port: 10050
- groups:
- - groupid: 1
- templates: "{{ tmpl_results.results | oo_collect('templateid') | oo_build_zabbix_list_dict('templateid') }}"
- output: extend
- filter:
- host:
- - ctr_test_kwoodson
-
- register: host_results
-
- - debug: var=host_results
diff --git a/playbooks/adhoc/noc/create_maintenance.yml b/playbooks/adhoc/noc/create_maintenance.yml
deleted file mode 100644
index b694aea1b..000000000
--- a/playbooks/adhoc/noc/create_maintenance.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-#ansible-playbook -e 'oo_desc=kwoodson test' -e 'oo_name=kwoodson test name' -e 'oo_start=1435715357' -e 'oo_stop=1435718985' -e 'oo_hostids=11549' create_maintenance.yml
-- name: 'Create a maintenace object in zabbix'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- vars:
- oo_hostids: ''
- oo_groupids: ''
- post_tasks:
- - assert:
- that: oo_desc is defined
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Maintenance
- state: present
- params:
- name: "{{ oo_name }}"
- description: "{{ oo_desc }}"
- active_since: "{{ oo_start }}"
- active_till: "{{ oo_stop }}"
- maintenance_type: "0"
- output: extend
- hostids: "{{ oo_hostids.split(',') | default([]) }}"
- #groupids: "{{ oo_groupids.split(',') | default([]) }}"
- timeperiods:
- - start_time: "{{ oo_start }}"
- period: "{{ oo_stop }}"
- selectTimeperiods: extend
-
- register: maintenance
-
- - debug: var=maintenance
diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml
deleted file mode 100644
index 32fc7ce68..000000000
--- a/playbooks/adhoc/noc/get_zabbix_problems.yml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- name: 'Get current hosts who have triggers that are alerting by trigger description'
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- roles:
- - os_zabbix
- post_tasks:
- - assert:
- that: oo_desc is defined
-
- - zbxapi:
- server: https://noc2.ops.rhcloud.com/zabbix/api_jsonrpc.php
- zbx_class: Trigger
- state: list
- params:
- only_true: true
- output: extend
- selectHosts: extend
- searchWildCardsEnabled: 1
- search:
- description: "{{ oo_desc }}"
- register: problems
-
- - debug: var=problems
-
- - set_fact:
- problem_hosts: "{{ problems.results | oo_collect(attribute='hosts') | oo_flatten | oo_collect(attribute='host') | difference(['aggregates']) }}"
-
- - debug: var=problem_hosts
-
- - add_host:
- name: "{{ item }}"
- groups: problem_hosts_group
- with_items: "{{ problem_hosts }}"
-
-- name: "Run on problem hosts"
- hosts: problem_hosts_group
- gather_facts: no
- tasks:
- - command: "{{ oo_cmd }}"
- when: oo_cmd is defined
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index bdd92a47d..f0cfa7f55 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -75,6 +75,10 @@
- hosts: nodes
become: yes
+ vars:
+ node_dirs:
+ - "/etc/origin"
+ - "/var/lib/origin"
tasks:
- name: unmask services
command: systemctl unmask "{{ item }}"
@@ -83,59 +87,66 @@
with_items:
- firewalld
- - name: Remove packages
- package: name={{ item }} state=absent
- when: not is_atomic | bool
- with_items:
- - atomic-enterprise
- - atomic-enterprise-node
- - atomic-enterprise-sdn-ovs
- - atomic-openshift
- - atomic-openshift-clients
- - atomic-openshift-node
- - atomic-openshift-sdn-ovs
- - cockpit-bridge
- - cockpit-docker
- - cockpit-shell
- - cockpit-ws
- - kubernetes-client
- - openshift
- - openshift-node
- - openshift-sdn
- - openshift-sdn-ovs
- - openvswitch
- - origin
- - origin-clients
- - origin-node
- - origin-sdn-ovs
- - tuned-profiles-atomic-enterprise-node
- - tuned-profiles-atomic-openshift-node
- - tuned-profiles-openshift-node
- - tuned-profiles-origin-node
-
- - name: Remove flannel package
- package: name=flannel state=absent
- when: openshift_use_flannel | default(false) | bool and not is_atomic | bool
-
- - shell: systemctl reset-failed
- changed_when: False
-
- - shell: systemctl daemon-reload
- changed_when: False
-
- - name: Remove br0 interface
- shell: ovs-vsctl del-br br0
- changed_when: False
- failed_when: False
-
- - name: Remove linux interfaces
- shell: ip link del "{{ item }}"
- changed_when: False
- failed_when: False
- with_items:
- - lbr0
- - vlinuxbr
- - vovsbr
+ - block:
+ - block:
+ - name: Remove packages
+ package: name={{ item }} state=absent
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-node
+ - atomic-enterprise-sdn-ovs
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-excluder
+ - atomic-openshift-docker-excluder
+ - atomic-openshift-node
+ - atomic-openshift-sdn-ovs
+ - cockpit-bridge
+ - cockpit-docker
+ - cockpit-shell
+ - cockpit-ws
+ - kubernetes-client
+ - openshift
+ - openshift-node
+ - openshift-sdn
+ - openshift-sdn-ovs
+ - openvswitch
+ - origin
+ - origin-excluder
+ - origin-docker-excluder
+ - origin-clients
+ - origin-node
+ - origin-sdn-ovs
+ - tuned-profiles-atomic-enterprise-node
+ - tuned-profiles-atomic-openshift-node
+ - tuned-profiles-openshift-node
+ - tuned-profiles-origin-node
+
+ - name: Remove flannel package
+ package: name=flannel state=absent
+ when: openshift_use_flannel | default(false) | bool
+ when: "{{ not is_atomic | bool }}"
+
+ - shell: systemctl reset-failed
+ changed_when: False
+
+ - shell: systemctl daemon-reload
+ changed_when: False
+
+ - name: Remove br0 interface
+ shell: ovs-vsctl del-br br0
+ changed_when: False
+ failed_when: False
+
+ - name: Remove linux interfaces
+ shell: ip link del "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - lbr0
+ - vlinuxbr
+ - vovsbr
+ when: "{{ openshift_remove_all | default(true) | bool }}"
- shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
changed_when: False
@@ -172,28 +183,57 @@
failed_when: False
with_items: "{{ exited_containers_to_delete.results }}"
- - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
- changed_when: False
- failed_when: False
- register: images_to_delete
+ - block:
+ - block:
+ - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
+ changed_when: False
+ failed_when: False
+ register: images_to_delete
+ with_items:
+ - registry\.access\..*redhat\.com/openshift3
+ - registry\.access\..*redhat\.com/aep3
+ - registry\.qe\.openshift\.com/.*
+ - registry\.access\..*redhat\.com/rhel7/etcd
+ - docker.io/openshift
+
+ - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
+ changed_when: False
+ failed_when: False
+ with_items: "{{ images_to_delete.results }}"
+ when: "{{ openshift_uninstall_images | default(True) | bool }}"
+
+ - name: remove sdn drop files
+ file:
+ path: /run/openshift-sdn
+ state: absent
+
+ - name: Remove files owned by RPMs
+ file: path={{ item }} state=absent
+ with_items:
+ - /etc/sysconfig/openshift-node
+ - /etc/sysconfig/openvswitch
+ - /run/openshift-sdn
+ when: "{{ openshift_remove_all | default(True) | bool }}"
+
+ - find: path={{ item }} file_type=file
+ register: files
with_items:
- - registry\.access\..*redhat\.com/openshift3
- - registry\.access\..*redhat\.com/aep3
- - registry\.qe\.openshift\.com/.*
- - registry\.access\..*redhat\.com/rhel7/etcd
- - docker.io/openshift
- when: openshift_uninstall_images | default(True) | bool
-
- - shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
- changed_when: False
- failed_when: False
- with_items: "{{ images_to_delete.results }}"
- when: openshift_uninstall_images | default(True) | bool
+ - "{{ node_dirs }}"
+
+ - find: path={{ item }} file_type=directory
+ register: directories
+ with_items:
+ - "{{ node_dirs }}"
- - name: Remove sdn drop files
- file:
- path: /run/openshift-sdn
- state: absent
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ files.results | default([]) }}"
+ - files
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ directories.results | default([]) }}"
+ - files
- name: Remove remaining files
file: path={{ item }} state=absent
@@ -205,13 +245,10 @@
- /etc/NetworkManager/dispatcher.d/99-origin-dns.sh
- /etc/openshift
- /etc/openshift-sdn
- - /etc/origin
- /etc/sysconfig/atomic-enterprise-node
- /etc/sysconfig/atomic-openshift-node
- /etc/sysconfig/atomic-openshift-node-dep
- - /etc/sysconfig/openshift-node
- /etc/sysconfig/openshift-node-dep
- - /etc/sysconfig/openvswitch
- /etc/sysconfig/origin-node
- /etc/sysconfig/origin-node
- /etc/sysconfig/origin-node-dep
@@ -223,10 +260,8 @@
- /etc/systemd/system/origin-node-dep.service
- /etc/systemd/system/origin-node.service
- /etc/systemd/system/origin-node.service.wants
- - /run/openshift-sdn
- /var/lib/atomic-enterprise
- /var/lib/openshift
- - /var/lib/origin
- name: restart docker
service: name=docker state=restarted
@@ -234,9 +269,12 @@
- name: restart NetworkManager
service: name=NetworkManager state=restarted
-
- hosts: masters
become: yes
+ vars:
+ master_dirs:
+ - "/etc/origin"
+ - "/var/lib/origin"
tasks:
- name: unmask services
command: systemctl unmask "{{ item }}"
@@ -248,12 +286,14 @@
- name: Remove packages
package: name={{ item }} state=absent
- when: not is_atomic | bool
+ when: not is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- atomic-enterprise
- atomic-enterprise-master
- atomic-openshift
- atomic-openshift-clients
+ - atomic-openshift-excluder
+ - atomic-openshift-docker-excluder
- atomic-openshift-master
- cockpit-bridge
- cockpit-docker
@@ -265,6 +305,8 @@
- openshift-master
- origin
- origin-clients
+ - origin-excluder
+ - origin-docker-excluder
- origin-master
- pacemaker
- pcs
@@ -275,6 +317,33 @@
- shell: systemctl daemon-reload
changed_when: False
+ - name: Remove files owned by RPMs
+ file: path={{ item }} state=absent
+ when: openshift_remove_all | default(True) | bool
+ with_items:
+ - /etc/sysconfig/atomic-openshift-master
+ - /etc/sysconfig/openvswitch
+
+ - find: path={{ item }} file_type=file
+ register: files
+ with_items:
+ - "{{ master_dirs }}"
+
+ - find: path={{ item }} file_type=directory
+ register: directories
+ with_items:
+ - "{{ master_dirs }}"
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ files.results | default([]) }}"
+ - files
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ directories.results | default([]) }}"
+ - files
+
- name: Remove remaining files
file: path={{ item }} state=absent
with_items:
@@ -284,7 +353,6 @@
- /etc/corosync
- /etc/openshift
- /etc/openshift-sdn
- - /etc/origin
- /etc/systemd/system/atomic-openshift-master.service
- /etc/systemd/system/atomic-openshift-master-api.service
- /etc/systemd/system/atomic-openshift-master-controllers.service
@@ -295,14 +363,12 @@
- /etc/sysconfig/atomic-enterprise-master
- /etc/sysconfig/atomic-enterprise-master-api
- /etc/sysconfig/atomic-enterprise-master-controllers
- - /etc/sysconfig/atomic-openshift-master
- /etc/sysconfig/atomic-openshift-master-api
- /etc/sysconfig/atomic-openshift-master-controllers
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-master-api
- /etc/sysconfig/origin-master-controllers
- /etc/sysconfig/openshift-master
- - /etc/sysconfig/openvswitch
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-master-api
- /etc/sysconfig/origin-master-controllers
@@ -310,7 +376,6 @@
- /usr/share/openshift/examples
- /var/lib/atomic-enterprise
- /var/lib/openshift
- - /var/lib/origin
- /var/lib/pacemaker
- /var/lib/pcsd
- /usr/lib/systemd/system/atomic-openshift-master-api.service
@@ -331,6 +396,10 @@
- hosts: etcd
become: yes
+ vars:
+ etcd_dirs:
+ - "/etc/etcd"
+ - "/var/lib/etcd"
tasks:
- name: unmask services
command: systemctl unmask "{{ item }}"
@@ -350,7 +419,7 @@
- name: Remove packages
package: name={{ item }} state=absent
- when: not is_atomic | bool
+ when: not is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- etcd
- etcd3
@@ -361,13 +430,25 @@
- shell: systemctl daemon-reload
changed_when: False
- - name: Remove remaining files
- file: path={{ item }} state=absent
+ - find: path={{ item }} file_type=file
+ register: files
with_items:
- - /etc/ansible/facts.d/openshift.fact
- - /etc/etcd
- - /etc/systemd/system/etcd_container.service
- - /etc/profile.d/etcdctl.sh
+ - "{{ etcd_dirs }}"
+
+ - find: path={{ item }} file_type=directory
+ register: directories
+ with_items:
+ - "{{ etcd_dirs }}"
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ files.results | default([]) }}"
+ - files
+
+ - file: path={{ item.1.path }} state=absent
+ with_subelements:
+ - "{{ directories.results | default([]) }}"
+ - files
# Intenationally using rm command over file module because if someone had mounted a filesystem
# at /var/lib/etcd then the contents was not removed correctly
@@ -377,6 +458,13 @@
warn: no
failed_when: false
+ - name: Remove remaining files
+ file: path={{ item }} state=absent
+ with_items:
+ - /etc/ansible/facts.d/openshift.fact
+ - /etc/systemd/system/etcd_container.service
+ - /etc/profile.d/etcdctl.sh
+
- hosts: lb
become: yes
tasks:
@@ -389,7 +477,7 @@
- name: Remove packages
package: name={{ item }} state=absent
- when: not is_atomic | bool
+ when: not is_atomic | bool and openshift_remove_all | default(True) | bool
with_items:
- haproxy
@@ -403,4 +491,4 @@
file: path={{ item }} state=absent
with_items:
- /etc/ansible/facts.d/openshift.fact
- - /var/lib/haproxy
+ - /var/lib/haproxy/stats
diff --git a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml b/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
deleted file mode 100644
index 955f990b7..000000000
--- a/playbooks/adhoc/zabbix_setup/clean_zabbix.yml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- vars:
- g_server: http://localhost:8080/zabbix/api_jsonrpc.php
- g_user: ''
- g_password: ''
-
- roles:
- - lib_zabbix
-
- post_tasks:
- - name: CLEAN List template for heartbeat
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- name: 'Template Heartbeat'
- register: templ_heartbeat
-
- - name: CLEAN List template app zabbix server
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- name: 'Template App Zabbix Server'
- register: templ_zabbix_server
-
- - name: CLEAN List template app zabbix server
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- name: 'Template App Zabbix Agent'
- register: templ_zabbix_agent
-
- - name: CLEAN List all templates
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- state: list
- register: templates
-
- - debug: var=templ_heartbeat.results
-
- - name: Remove templates if heartbeat template is missing
- zbx_template:
- zbx_server: "{{ g_server }}"
- zbx_user: "{{ g_user }}"
- zbx_password: "{{ g_password }}"
- name: "{{ item }}"
- state: absent
- with_items: "{{ templates.results | difference(templ_zabbix_agent.results) | difference(templ_zabbix_server.results) | oo_collect('host') }}"
- when: templ_heartbeat.results | length == 0
diff --git a/playbooks/adhoc/zabbix_setup/filter_plugins b/playbooks/adhoc/zabbix_setup/filter_plugins
deleted file mode 120000
index b0b7a3414..000000000
--- a/playbooks/adhoc/zabbix_setup/filter_plugins
+++ /dev/null
@@ -1 +0,0 @@
-../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
deleted file mode 100755
index 0fe65b338..000000000
--- a/playbooks/adhoc/zabbix_setup/oo-clean-zaio.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env ansible-playbook
----
-- include: clean_zabbix.yml
- vars:
- g_server: http://localhost/zabbix/api_jsonrpc.php
- g_user: Admin
- g_password: zabbix
diff --git a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml b/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
deleted file mode 100755
index 0d5e01878..000000000
--- a/playbooks/adhoc/zabbix_setup/oo-config-zaio.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/ansible-playbook
----
-- hosts: localhost
- gather_facts: no
- connection: local
- become: no
- vars:
- g_server: http://localhost/zabbix/api_jsonrpc.php
- g_user: Admin
- g_password: zabbix
- g_zbx_scriptrunner_user: scriptrunner
- g_zbx_scriptrunner_bastion_host: specialhost.example.com
- roles:
- - role: os_zabbix
- ozb_server: "{{ g_server }}"
- ozb_user: "{{ g_user }}"
- ozb_password: "{{ g_password }}"
- ozb_scriptrunner_user: "{{ g_zbx_scriptrunner_user }}"
- ozb_scriptrunner_bastion_host: "{{ g_zbx_scriptrunner_bastion_host }}"
diff --git a/playbooks/adhoc/zabbix_setup/roles b/playbooks/adhoc/zabbix_setup/roles
deleted file mode 120000
index 20c4c58cf..000000000
--- a/playbooks/adhoc/zabbix_setup/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../roles \ No newline at end of file
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
new file mode 100644
index 000000000..99698b4d0
--- /dev/null
+++ b/playbooks/aws/README.md
@@ -0,0 +1,4 @@
+# AWS playbooks
+
+This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
+which is community supported and most use is considered deprecated.
diff --git a/playbooks/byo/README.md b/playbooks/byo/README.md
new file mode 100644
index 000000000..460fd7cf6
--- /dev/null
+++ b/playbooks/byo/README.md
@@ -0,0 +1,11 @@
+# Bring Your Own hosts playbooks
+
+This directory has the most actively used, maintained and supported set of
+playbooks for installing, upgrading and performing others tasks on OpenShift
+clusters.
+
+Usage is documented in the official OpenShift documentation pages, under the
+Advanced Installation topic:
+
+- [OpenShift Origin: Advanced Installation](https://docs.openshift.org/latest/install_config/install/advanced_install.html)
+- [OpenShift Container Platform: Advanced Installation](https://docs.openshift.com/container-platform/latest/install_config/install/advanced_install.html)
diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml
new file mode 100644
index 000000000..09ab91bbd
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/openshift-logging.yml
@@ -0,0 +1,35 @@
+---
+#
+# This playbook is a preview of upcoming changes for installing
+# Hosted logging on. See inventory/byo/hosts.*.example for the
+# currently supported method.
+#
+- include: ../../common/openshift-cluster/verify_ansible_version.yml
+
+- name: Create initial host groups for localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+ - add_host:
+ name: "{{ item }}"
+ groups: l_oo_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
+
+- name: Create initial host groups for all hosts
+ hosts: l_oo_all_hosts
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+
+- include: ../../common/openshift-cluster/openshift_logging.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: "{{ debug_level | default(2) }}"
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
index 0d451cf77..dc0bf73a2 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
@@ -18,20 +18,20 @@
# If a node fails, halt everything, the admin will need to clean up and we
# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
# and will not take any action on a node already running the requested docker version.
-- name: Evacuate and upgrade nodes
+- name: Drain and upgrade nodes
hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
serial: 1
any_errors_fatal: true
tasks:
- - name: Prepare for Node evacuation
+ - name: Prepare for Node draining
command: >
{{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
- - name: Evacuate Node for Kubelet upgrade
+ - name: Drain Node for Kubelet upgrade
command: >
- {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force
+ {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} {{ openshift.common.evacuate_or_drain }} --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index 561be7859..d337b6f75 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -1,7 +1,5 @@
---
# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
-#
-# Currently only supports upgrading 1.9.x to >= 1.10.x.
- hosts: localhost
connection: local
become: no
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index 4ce815271..84a5a026f 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -66,6 +66,10 @@
tags:
- pre_upgrade
+- include: ../../../../common/openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index d6af71827..7717c95e4 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -71,6 +71,10 @@
tags:
- pre_upgrade
+- include: ../../../../common/openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
index 496b00697..6b69348b7 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -6,8 +6,8 @@
tags:
- pre_upgrade
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
+- name: Configure the upgrade target for the common upgrade tasks
+ hosts: l_oo_all_hosts
tags:
- pre_upgrade
tasks:
@@ -66,6 +66,10 @@
tags:
- pre_upgrade
+- include: ../../../../common/openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
@@ -89,6 +93,8 @@
- include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 8cde2ac88..92d7c943a 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -71,6 +71,10 @@
tags:
- pre_upgrade
+- include: ../../../../common/openshift-master/validate_restart.yml
+ tags:
+ - pre_upgrade
+
- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
@@ -94,5 +98,7 @@
- include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_4/master_config_upgrade.yml"
- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
index 0a163526a..b60807a71 100644
--- a/playbooks/byo/openshift-master/restart.yml
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -15,4 +15,16 @@
tasks:
- include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
-- include: ../../common/openshift-master/restart.yml
+- include: ../../common/openshift-cluster/evaluate_groups.yml
+- include: ../../common/openshift-master/validate_restart.yml
+
+- name: Restart masters
+ hosts: oo_masters_to_config
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ serial: 1
+ tasks:
+ - include: restart_hosts.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+ - include: restart_services.yml
+ when: openshift.common.rolling_restart_mode == 'services'
diff --git a/playbooks/byo/openshift-preflight/README.md b/playbooks/byo/openshift-preflight/README.md
new file mode 100644
index 000000000..b50292eac
--- /dev/null
+++ b/playbooks/byo/openshift-preflight/README.md
@@ -0,0 +1,43 @@
+# OpenShift preflight checks
+
+Here we provide an Ansible playbook for detecting potential roadblocks prior to
+an install or upgrade.
+
+Ansible's default operation mode is to fail fast, on the first error. However,
+when performing checks, it is useful to gather as much information about
+problems as possible in a single run.
+
+The `check.yml` playbook runs a battery of checks against the inventory hosts
+and tells Ansible to ignore intermediate errors, thus giving a more complete
+diagnostic of the state of each host. Still, if any check failed, the playbook
+run will be marked as having failed.
+
+To facilitate understanding the problems that were encountered, we provide a
+custom callback plugin to summarize execution errors at the end of a playbook
+run.
+
+---
+
+*Note that currently the `check.yml` playbook is only useful for RPM-based
+installations. Containerized installs are excluded from checks for now, but
+might be included in the future if there is demand for that.*
+
+---
+
+## Running
+
+With an installation of Ansible 2.2 or greater, run the playbook directly
+against your inventory file. Here is the step-by-step:
+
+1. If you haven't done it yet, clone this repository:
+
+ ```console
+ $ git clone https://github.com/openshift/openshift-ansible
+ $ cd openshift-ansible
+ ```
+
+2. Run the playbook:
+
+ ```console
+ $ ansible-playbook -i <inventory file> playbooks/byo/openshift-preflight/check.yml
+ ```
diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml
new file mode 100644
index 000000000..32673d01d
--- /dev/null
+++ b/playbooks/byo/openshift-preflight/check.yml
@@ -0,0 +1,31 @@
+---
+- hosts: OSEv3
+ roles:
+ - openshift_preflight/init
+
+- hosts: OSEv3
+ name: checks that apply to all hosts
+ gather_facts: no
+ ignore_errors: yes
+ roles:
+ - openshift_preflight/common
+
+- hosts: masters
+ name: checks that apply to masters
+ gather_facts: no
+ ignore_errors: yes
+ roles:
+ - openshift_preflight/masters
+
+- hosts: nodes
+ name: checks that apply to nodes
+ gather_facts: no
+ ignore_errors: yes
+ roles:
+ - openshift_preflight/nodes
+
+- hosts: OSEv3
+ name: verify check results
+ gather_facts: no
+ roles:
+ - openshift_preflight/verify_status
diff --git a/playbooks/common/README.md b/playbooks/common/README.md
new file mode 100644
index 000000000..0b5e26989
--- /dev/null
+++ b/playbooks/common/README.md
@@ -0,0 +1,9 @@
+# Common playbooks
+
+This directory has a generic set of playbooks that are included by playbooks in
+[`byo`](../byo), as well as other playbooks related to the
+[`bin/cluster`](../../bin) tool.
+
+Note: playbooks in this directory use generic group names that do not line up
+with the groups used by the `byo` playbooks or `bin/cluster` derived playbooks,
+requiring an explicit remapping of groups.
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 0f226f5f9..a95cb68b7 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -38,6 +38,9 @@
- set_fact:
openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}"
when: openshift_docker_log_options is not defined
+ - set_fact:
+ openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}"
+ when: openshift_docker_selinux_enabled is not defined
- include: ../openshift-etcd/config.yml
tags:
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index cd2f2e6aa..ec5b18389 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -26,29 +26,8 @@
logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}"
logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}"
roles:
- - role: openshift_cli
- - role: openshift_hosted_facts
- - role: openshift_projects
- # TODO: Move standard project definitions to openshift_hosted/vars/main.yml
- # Vars are not accessible in meta/main.yml in ansible-1.9.x
- openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts({'default':{'default_node_selector':''},'openshift-infra':{'default_node_selector':''},'logging':{'default_node_selector':''}}) }}"
- - role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - hostnetwork
- when: openshift.common.version_gte_3_2_or_1_2
- - role: openshift_serviceaccounts
- openshift_serviceaccounts_names:
- - router
- - registry
- openshift_serviceaccounts_namespace: default
- openshift_serviceaccounts_sccs:
- - privileged
- when: not openshift.common.version_gte_3_2_or_1_2
- role: openshift_hosted
- - role: openshift_metrics
+ - role: openshift_hosted_metrics
when: openshift_hosted_metrics_deploy | default(false) | bool
- role: openshift_hosted_logging
when: openshift_hosted_logging_deploy | default(false) | bool
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
new file mode 100644
index 000000000..6347cbc26
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -0,0 +1,5 @@
+---
+- name: OpenShift Aggregated Logging
+ hosts: oo_first_master
+ roles:
+ - openshift_logging
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
new file mode 100644
index 000000000..9f38ceea6
--- /dev/null
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -0,0 +1,5 @@
+---
+- name: OpenShift Metrics
+ hosts: oo_first_master
+ roles:
+ - openshift_metrics
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml
index 5f008a045..2383836d4 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml
@@ -108,10 +108,6 @@
| oo_select_keys(groups['oo_etcd_to_config'] | default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
- openshift_master_hostnames: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'] | default([]))
- | oo_collect('openshift.common.all_hostnames')
- | oo_flatten | unique }}"
openshift_certificates_redeploy: true
- role: openshift_etcd_client_certificates
etcd_certificates_redeploy: true
@@ -204,7 +200,7 @@
cp {{ openshift.common.config_base }}/master//admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
changed_when: False
-- name: Serially evacuate all nodes to trigger redeployments
+- name: Serially drain all nodes to trigger redeployments
hosts: oo_nodes_to_config
serial: 1
any_errors_fatal: true
@@ -222,7 +218,7 @@
was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}"
when: openshift_certificates_redeploy_ca | default(false) | bool
- - name: Prepare for node evacuation
+ - name: Prepare for node draining
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
@@ -230,11 +226,11 @@
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
- - name: Evacuate node
+ - name: Drain node
command: >
{{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig
manage-node {{ openshift.node.nodename }}
- --evacuate --force
+ {{ openshift.common.evacuate_or_drain }} --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
index d800b289b..1b418920f 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/restart.yml
@@ -19,11 +19,9 @@
when: openshift.common.is_containerized | bool
- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ inventory_hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
when: inventory_hostname in groups.oo_masters_to_config
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
index 44ddf97ad..17f8fc6e9 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml
@@ -20,7 +20,7 @@
- debug: var=docker_image_count.stdout
- name: Remove all containers and images
- script: nuke_images.sh docker
+ script: nuke_images.sh
register: nuke_images_result
when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
index e3379f29b..b2a2eac9a 100644
--- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
+++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml
@@ -30,9 +30,9 @@
changed_when: false
- fail:
- msg: This playbook requires access to Docker 1.10 or later
- # Disable the 1.10 requirement if the user set a specific Docker version
- when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.10','<')))
+ msg: This playbook requires access to Docker 1.12 or later
+ # Disable the 1.12 requirement if the user set a specific Docker version
+ when: docker_version is not defined and (docker_upgrade is not defined or docker_upgrade | bool == True) and (pkg_check.rc == 0 and (avail_docker_version.stdout == "" or avail_docker_version.stdout | version_compare('1.12','<')))
# Default l_docker_upgrade to False, we'll set to True if an upgrade is required:
- set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
index 0a972adf6..d0eadf1fc 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml
@@ -4,6 +4,7 @@
vars:
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' }}"
roles:
- openshift_facts
tasks:
@@ -42,19 +43,32 @@
{{ avail_disk.stdout }} Kb available.
when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)
- # TODO - Refactor containerized backup to use etcd_container to backup the data so we don't rely on
- # the host's etcdctl binary which may be of a different version.
-
- # for non containerized and non embedded we should have the correct version of etcd installed already
- # For embedded we need to use the latest because OCP 3.3 uses a version of etcd that can only be backed
- # up with etcd-3.x
+ # For non containerized and non embedded we should have the correct version of
+ # etcd installed already. So don't do anything.
+ #
+ # For embedded or containerized we need to use the latest because OCP 3.3 uses
+ # a version of etcd that can only be backed up with etcd-3.x and if it's
+ # containerized then etcd version may be newer than that on the host so
+ # upgrade it.
+ #
+ # On atomic we have neither yum nor dnf so ansible throws a hard to debug error
+ # if you use package there, like this: "Could not find a module for unknown."
+ # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668
+ #
+ # TODO - We should refactor all containerized backups to use the containerized
+ # version of etcd to perform the backup rather than relying on the host's
+ # binaries. Until we do that we'll continue to have problems backing up etcd
+ # when atomic host has an older version than the version that's running in the
+ # container whether that's embedded or not
- name: Install latest etcd for containerized or embedded
- package: name=etcd state=latest
- when: ( openshift.common.is_containerized and not openshift.common.is_atomic ) or embedded_etcd | bool
+ package:
+ name: etcd
+ state: latest
+ when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic
- name: Generate etcd backup
command: >
- etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
+ {{ etcdctl_command }} backup --data-dir={{ openshift.etcd.etcd_data_dir }}
--backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}
- set_fact:
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
index f88981a0b..5f8b59e17 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml
@@ -8,8 +8,7 @@
- name: Set new_etcd_image
set_fact:
- new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd3:' ~ upgrade_version ) if upgrade_version | version_compare('3.0','>=')
- else current_image.stdout.split(':')[0] ~ ':' ~ upgrade_version }}"
+ new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ upgrade_version ) }}"
- name: Pull new etcd image
command: "docker pull {{ new_etcd_image }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
index 5ff9521ec..0f8d94737 100644
--- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml
@@ -75,7 +75,7 @@
hosts: etcd_hosts_to_upgrade
serial: 1
vars:
- upgrade_version: 3.0.14
+ upgrade_version: 3.0.15
tasks:
- include: containerized_tasks.yml
when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
deleted file mode 100644
index e5c958ebb..000000000
--- a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/usr/bin/env python
-"""
-Pre-upgrade checks that must be run on a master before proceeding with upgrade.
-"""
-# This is a script not a python module:
-# pylint: disable=invalid-name
-
-# NOTE: This script should not require any python libs other than what is
-# in the standard library.
-
-__license__ = "ASL 2.0"
-
-import json
-import os
-import subprocess
-import re
-
-# The maximum length of container.ports.name
-ALLOWED_LENGTH = 15
-# The valid structure of container.ports.name
-ALLOWED_CHARS = re.compile('^[a-z0-9][a-z0-9\\-]*[a-z0-9]$')
-AT_LEAST_ONE_LETTER = re.compile('[a-z]')
-# look at OS_PATH for the full path. Default ot 'oc'
-OC_PATH = os.getenv('OC_PATH', 'oc')
-
-
-def validate(value):
- """
- validate verifies that value matches required conventions
-
- Rules of container.ports.name validation:
-
- * must be less that 16 chars
- * at least one letter
- * only a-z0-9-
- * hyphens can not be leading or trailing or next to each other
-
- :Parameters:
- - `value`: Value to validate
- """
- if len(value) > ALLOWED_LENGTH:
- return False
-
- if '--' in value:
- return False
-
- # We search since it can be anywhere
- if not AT_LEAST_ONE_LETTER.search(value):
- return False
-
- # We match because it must start at the beginning
- if not ALLOWED_CHARS.match(value):
- return False
- return True
-
-
-def list_items(kind):
- """
- list_items returns a list of items from the api
-
- :Parameters:
- - `kind`: Kind of item to access
- """
- response = subprocess.check_output([OC_PATH, 'get', '--all-namespaces', '-o', 'json', kind])
- items = json.loads(response)
- return items.get("items", [])
-
-
-def get(obj, *paths):
- """
- Gets an object
-
- :Parameters:
- - `obj`: A dictionary structure
- - `path`: All other non-keyword arguments
- """
- ret_obj = obj
- for path in paths:
- if ret_obj.get(path, None) is None:
- return []
- ret_obj = ret_obj[path]
- return ret_obj
-
-
-# pylint: disable=too-many-arguments
-def pretty_print_errors(namespace, kind, item_name, container_name, invalid_label, port_name, valid):
- """
- Prints out results in human friendly way.
-
- :Parameters:
- - `namespace`: Namespace of the resource
- - `kind`: Kind of the resource
- - `item_name`: Name of the resource
- - `container_name`: Name of the container. May be "" when kind=Service.
- - `port_name`: Name of the port
- - `invalid_label`: The label of the invalid port. Port.name/targetPort
- - `valid`: True if the port is valid
- """
- if not valid:
- if len(container_name) > 0:
- print('%s/%s -n %s (Container="%s" %s="%s")' % (
- kind, item_name, namespace, container_name, invalid_label, port_name))
- else:
- print('%s/%s -n %s (%s="%s")' % (
- kind, item_name, namespace, invalid_label, port_name))
-
-
-def print_validation_header():
- """
- Prints the error header. Should run on the first error to avoid
- overwhelming the user.
- """
- print """\
-At least one port name is invalid and must be corrected before upgrading.
-Please update or remove any resources with invalid port names.
-
- Valid port names must:
-
- * be less that 16 characters
- * have at least one letter
- * contain only a-z0-9-
- * not start or end with -
- * not contain dashes next to each other ('--')
-"""
-
-
-def main():
- """
- main is the main entry point to this script
- """
- try:
- # the comma at the end suppresses the newline
- print "Checking for oc ...",
- subprocess.check_output([OC_PATH, 'whoami'])
- print "found"
- except:
- print(
- 'Unable to run "%s whoami"\n'
- 'Please ensure OpenShift is running, and "oc" is on your system '
- 'path.\n'
- 'You can override the path with the OC_PATH environment variable.'
- % OC_PATH)
- raise SystemExit(1)
-
- # Where the magic happens
- first_error = True
- for kind, path in [
- ('deploymentconfigs', ("spec", "template", "spec", "containers")),
- ('replicationcontrollers', ("spec", "template", "spec", "containers")),
- ('pods', ("spec", "containers"))]:
- for item in list_items(kind):
- namespace = item["metadata"]["namespace"]
- item_name = item["metadata"]["name"]
- for container in get(item, *path):
- container_name = container["name"]
- for port in get(container, "ports"):
- port_name = port.get("name", None)
- if not port_name:
- # Unnamed ports are OK
- continue
- valid = validate(port_name)
- if not valid and first_error:
- first_error = False
- print_validation_header()
- pretty_print_errors(
- namespace, kind, item_name,
- container_name, "Port.name", port_name, valid)
-
- # Services follow a different flow
- for item in list_items('services'):
- namespace = item["metadata"]["namespace"]
- item_name = item["metadata"]["name"]
- for port in get(item, "spec", "ports"):
- port_name = port.get("targetPort", None)
- if isinstance(port_name, int) or port_name is None:
- # Integer only or unnamed ports are OK
- continue
- valid = validate(port_name)
- if not valid and first_error:
- first_error = False
- print_validation_header()
- pretty_print_errors(
- namespace, "services", item_name, "",
- "targetPort", port_name, valid)
-
- # If we had at least 1 error then exit with 1
- if not first_error:
- raise SystemExit(1)
-
-
-if __name__ == '__main__':
- main()
-
diff --git a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh b/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
deleted file mode 100644
index 7bf249742..000000000
--- a/playbooks/common/openshift-cluster/upgrades/files/rpm_versions.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-if [ `which dnf 2> /dev/null` ]; then
- installed=$(dnf repoquery --installed --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null)
- available=$(dnf repoquery --available --latest-limit 1 -d 0 --qf '%{version}-%{release}' "${@}" 2> /dev/null)
-else
- installed=$(repoquery --plugins --pkgnarrow=installed --qf '%{version}-%{release}' "${@}" 2> /dev/null)
- available=$(repoquery --plugins --pkgnarrow=available --qf '%{version}-%{release}' "${@}" 2> /dev/null)
-fi
-
-echo "---"
-echo "curr_version: ${installed}"
-echo "avail_version: ${available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml
index 8cac2fb3b..76645ff3f 100644
--- a/playbooks/common/openshift-cluster/upgrades/init.yml
+++ b/playbooks/common/openshift-cluster/upgrades/init.yml
@@ -1,5 +1,6 @@
---
-- hosts: localhost
+- name: Create l_oo_all_hosts group
+ hosts: localhost
connection: local
become: no
gather_facts: no
@@ -10,7 +11,8 @@
groups: l_oo_all_hosts
with_items: "{{ g_all_hosts | default([]) }}"
-- hosts: l_oo_all_hosts
+- name: Include g_*_hosts vars for hosts in group l_oo_all_hosts
+ hosts: l_oo_all_hosts
gather_facts: no
tasks:
- include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml
@@ -46,3 +48,14 @@
when: openshift_docker_log_options is not defined
- include: ../initialize_facts.yml
+
+- name: Ensure clean repo cache in the event repos have been changed manually
+ hosts: oo_all_hosts
+ tags:
+ - pre_upgrade
+ tasks:
+ - name: Clean package cache
+ command: "{{ ansible_pkg_mgr }} clean all"
+ when: not openshift.common.is_atomic | bool
+ args:
+ warn: no
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
index 1238acb05..673f11889 100755
--- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -146,7 +146,7 @@ def main():
# ignore broad-except error to avoid stack trace to ansible user
# pylint: disable=broad-except
- except Exception, e:
+ except Exception as e:
return module.fail_json(msg=str(e))
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
index ba4d77617..7646e0fa6 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
@@ -19,5 +19,5 @@
when: openshift.common.is_atomic | bool
- fail:
- msg: This playbook requires access to Docker 1.10 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.10','<')
+ msg: This playbook requires access to Docker 1.12 or later
+ when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 9632626a4..c83923dae 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -12,10 +12,6 @@
msg: Verify the correct version was found
when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version
- - name: Clean package cache
- command: "{{ ansible_pkg_mgr }} clean all"
- when: not openshift.common.is_atomic | bool
-
- set_fact:
g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}"
when: not openshift.common.is_containerized | bool
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 474e6311e..77b37cdc2 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -30,14 +30,6 @@
- name: Upgrade and backup etcd
include: ./etcd/main.yml
-- name: Upgrade master packages
- hosts: oo_masters_to_config
- roles:
- - openshift_facts
- tasks:
- - include: rpm_upgrade.yml component=master
- when: not openshift.common.is_containerized | bool
-
# Create service signer cert when missing. Service signer certificate
# is added to master config in the master config hook for v3_3.
- name: Determine if service signer cert must be created
@@ -51,15 +43,40 @@
- include: create_service_signer_cert.yml
-- name: Upgrade master config and systemd units
+# Set openshift_master_facts separately. In order to reconcile
+# admission_config's, we currently must run openshift_master_facts and
+# then run openshift_facts.
+- name: Set OpenShift master facts
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_master_facts
+
+# The main master upgrade play. Should handle all changes to the system in one pass, with
+# support for optional hooks to be defined.
+- name: Upgrade master
hosts: oo_masters_to_config
+ vars:
+ openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
+ serial: 1
handlers:
- include: ../../../../roles/openshift_master/handlers/main.yml
static: yes
roles:
- openshift_facts
- - openshift_master_facts
- tasks:
+ post_tasks:
+
+ # Run the pre-upgrade hook if defined:
+ - debug: msg="Running master pre-upgrade hook {{ openshift_master_upgrade_pre_hook }}"
+ when: openshift_master_upgrade_pre_hook is defined
+
+ - include: "{{ openshift_master_upgrade_pre_hook }}"
+ when: openshift_master_upgrade_pre_hook is defined
+
+ - include: rpm_upgrade.yml component=master
+ when: not openshift.common.is_containerized | bool
+
+ - include_vars: ../../../../roles/openshift_master_facts/vars/main.yml
+
- include: upgrade_scheduler.yml
- include: "{{ master_config_hook }}"
@@ -95,9 +112,26 @@
state: link
when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists
-- name: Set master update status to complete
- hosts: oo_masters_to_config
- tasks:
+ # Run the upgrade hook prior to restarting services/system if defined:
+ - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
+ when: openshift_master_upgrade_hook is defined
+
+ - include: "{{ openshift_master_upgrade_hook }}"
+ when: openshift_master_upgrade_hook is defined
+
+ - include: ../../openshift-master/restart_hosts.yml
+ when: openshift.common.rolling_restart_mode == 'system'
+
+ - include: ../../openshift-master/restart_services.yml
+ when: openshift.common.rolling_restart_mode == 'services'
+
+ # Run the post-upgrade hook if defined:
+ - debug: msg="Running master post-upgrade hook {{ openshift_master_upgrade_post_hook }}"
+ when: openshift_master_upgrade_post_hook is defined
+
+ - include: "{{ openshift_master_upgrade_post_hook }}"
+ when: openshift_master_upgrade_post_hook is defined
+
- set_fact:
master_update_complete: True
@@ -119,10 +153,6 @@
msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
when: master_update_failed | length > 0
-# We are now ready to restart master services (or entire system
-# depending on openshift_rolling_restart_mode):
-- include: ../../openshift-master/restart.yml
-
###############################################################################
# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints
###############################################################################
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index b3ac34d90..2bb460815 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -1,5 +1,5 @@
---
-- name: Evacuate and upgrade nodes
+- name: Drain and upgrade nodes
hosts: oo_nodes_to_upgrade
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
@@ -39,9 +39,9 @@
retries: 3
delay: 1
- - name: Evacuate Node for Kubelet upgrade
+ - name: Drain Node for Kubelet upgrade
command: >
- {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force
+ {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} {{ openshift.common.evacuate_or_drain }} --force
delegate_to: "{{ groups.oo_first_master.0 }}"
when: inventory_hostname in groups.oo_nodes_to_upgrade
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 8058d3377..7a334e771 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -74,22 +74,6 @@
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"
master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"
- - openshift_facts:
- role: hosted
- openshift_env:
- openshift_hosted_registry_storage_kind: 'nfs'
- when: openshift_hosted_registry_storage_kind is not defined and groups.oo_nfs_to_config is defined and groups.oo_nfs_to_config | length > 0
-
-- name: Create temp directory for syncing certs
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - name: Create local temp directory for syncing certs
- local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_master_mktemp
- changed_when: False
- name: Determine if session secrets must be generated
hosts: oo_first_master
@@ -122,7 +106,6 @@
hosts: oo_masters_to_config
any_errors_fatal: true
vars:
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
openshift_master_ha: "{{ openshift.master.ha }}"
openshift_master_count: "{{ openshift.master.master_count }}"
openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"
@@ -133,57 +116,19 @@
| oo_collect('openshift.common.hostname') | default([]) | join (',')
}}"
roles:
- - role: openshift_master_facts
- - role: openshift_hosted_facts
- - role: openshift_master_certificates
+ - role: openshift_master
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
| oo_select_keys(groups['oo_etcd_to_config'] | default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
- openshift_master_hostnames: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'] | default([]))
- | oo_collect('openshift.common.all_hostnames')
- | oo_flatten | unique }}"
- - role: openshift_etcd_client_certificates
+ openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
etcd_cert_prefix: "master.etcd-"
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- - role: openshift_clock
- - role: openshift_cloud_provider
- - role: openshift_builddefaults
- - role: os_firewall
- os_firewall_allow:
- - service: api server https
- port: "{{ openshift.master.api_port }}/tcp"
- - service: api controllers https
- port: "{{ openshift.master.controllers_port }}/tcp"
- - service: skydns tcp
- port: "{{ openshift.master.dns_port }}/tcp"
- - service: skydns udp
- port: "{{ openshift.master.dns_port }}/udp"
- - role: os_firewall
- os_firewall_allow:
- - service: etcd embedded
- port: 4001/tcp
- when: groups.oo_etcd_to_config | default([]) | length == 0
- - role: openshift_master
- openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
- - role: nickhammond.logrotate
- - role: nuage_master
- when: openshift.common.use_nuage | bool
+
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
-
-- name: Delete temporary directory on localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tasks:
- - file: name={{ g_master_mktemp.stdout }} state=absent
- changed_when: False
diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml
index ffa23d26a..832301e3d 100644
--- a/playbooks/common/openshift-master/restart_hosts.yml
+++ b/playbooks/common/openshift-master/restart_hosts.yml
@@ -7,12 +7,19 @@
ignore_errors: true
become: yes
-# Ensure the api_port is available.
-- name: Wait for master API to come back online
- become: no
+- name: Wait for master to restart
local_action:
module: wait_for
- host="{{ openshift.common.hostname }}"
+ host="{{ inventory_hostname }}"
state=started
delay=10
- port="{{ openshift.master.api_port }}"
+ become: no
+
+# Now that ssh is back up we can wait for API on the remote system,
+# avoiding some potential connection issues from local system:
+- name: Wait for master API to come back online
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml
index 25fa10450..508b5a3ac 100644
--- a/playbooks/common/openshift-master/restart_services.yml
+++ b/playbooks/common/openshift-master/restart_services.yml
@@ -8,16 +8,14 @@
service:
name: "{{ openshift.common.service_type }}-master-api"
state: restarted
- when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+ when: openshift_master_ha | bool
- name: Wait for master API to come back online
- become: no
- local_action:
- module: wait_for
- host="{{ openshift.common.hostname }}"
- state=started
- delay=10
- port="{{ openshift.master.api_port }}"
- when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+ wait_for:
+ host: "{{ openshift.common.hostname }}"
+ state: started
+ delay: 10
+ port: "{{ openshift.master.api_port }}"
+ when: openshift_master_ha | bool
- name: Restart master controllers
service:
name: "{{ openshift.common.service_type }}-master-controllers"
@@ -25,4 +23,4 @@
# Ignore errrors since it is possible that type != simple for
# pre-3.1.1 installations.
ignore_errors: true
- when: openshift_master_ha | bool and openshift.master.cluster_method != 'pacemaker'
+ when: openshift_master_ha | bool
diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/validate_restart.yml
index 7b340887a..5dbb21502 100644
--- a/playbooks/common/openshift-master/restart.yml
+++ b/playbooks/common/openshift-master/validate_restart.yml
@@ -1,6 +1,4 @@
---
-- include: ../openshift-cluster/evaluate_groups.yml
-
- name: Validate configuration for rolling restart
hosts: oo_masters_to_config
roles:
@@ -65,14 +63,3 @@
- set_fact:
current_host: "{{ exists.stat.exists }}"
when: openshift.common.rolling_restart_mode == 'system'
-
-- name: Restart masters
- hosts: oo_masters_to_config
- vars:
- openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- serial: 1
- tasks:
- - include: restart_hosts.yml
- when: openshift.common.rolling_restart_mode == 'system'
- - include: restart_services.yml
- when: openshift.common.rolling_restart_mode == 'services'
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index e28da5713..b36c0eedf 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -60,30 +60,8 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- - role: openshift_common
- - role: openshift_clock
- - role: openshift_docker
- - role: openshift_node_certificates
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- - role: openshift_cloud_provider
- - role: openshift_node_dnsmasq
- when: openshift.common.use_dnsmasq | bool
- - role: os_firewall
- os_firewall_allow:
- - service: Kubernetes kubelet
- port: 10250/tcp
- - service: http
- port: 80/tcp
- - service: https
- port: 443/tcp
- - service: Openshift kubelet ReadOnlyPort
- port: 10255/tcp
- - service: Openshift kubelet ReadOnlyPort udp
- port: 10255/udp
- - service: OpenShift OVS sdn
- port: 4789/udp
- when: openshift.node.use_openshift_sdn | bool
- role: openshift_node
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- name: Configure nodes
hosts: oo_nodes_to_config:!oo_containerized_master_nodes
@@ -99,30 +77,8 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- - role: openshift_common
- - role: openshift_clock
- - role: openshift_docker
- - role: openshift_node_certificates
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- - role: openshift_cloud_provider
- - role: openshift_node_dnsmasq
- when: openshift.common.use_dnsmasq | bool
- - role: os_firewall
- os_firewall_allow:
- - service: Kubernetes kubelet
- port: 10250/tcp
- - service: http
- port: 80/tcp
- - service: https
- port: 443/tcp
- - service: Openshift kubelet ReadOnlyPort
- port: 10255/tcp
- - service: Openshift kubelet ReadOnlyPort udp
- port: 10255/udp
- - service: OpenShift OVS sdn
- port: 4789/udp
- when: openshift.node.use_openshift_sdn | bool
- role: openshift_node
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- name: Additional node config
hosts: oo_nodes_to_config
diff --git a/playbooks/gce/README.md b/playbooks/gce/README.md
new file mode 100644
index 000000000..0514d6f50
--- /dev/null
+++ b/playbooks/gce/README.md
@@ -0,0 +1,4 @@
+# GCE playbooks
+
+This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
+which is community supported and most use is considered deprecated.
diff --git a/playbooks/libvirt/README.md b/playbooks/libvirt/README.md
new file mode 100644
index 000000000..3ce46a76f
--- /dev/null
+++ b/playbooks/libvirt/README.md
@@ -0,0 +1,4 @@
+# libvirt playbooks
+
+This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
+which is community supported and most use is considered deprecated.
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md
new file mode 100644
index 000000000..a6d8d6995
--- /dev/null
+++ b/playbooks/openstack/README.md
@@ -0,0 +1,4 @@
+# OpenStack playbooks
+
+This playbook directory is meant to be driven by [`bin/cluster`](../../bin),
+which is community supported and most use is considered deprecated.
diff --git a/requirements.txt b/requirements.txt
index e55ef5f0b..8f47033f8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1,4 @@
-ansible>=2.1
+ansible>=2.2
+six
pyOpenSSL
+PyYAML
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index dadd62c93..ad28cece9 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -11,4 +11,3 @@ galaxy_info:
- 7
dependencies:
- role: os_firewall
- os_firewall_use_firewalld: False
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index a93bdc2ad..66c9cfa0f 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -15,14 +15,6 @@
msg: "Docker {{ docker_version }} requested, but >= 1.9.1 is required."
when: docker_version is defined and docker_version | version_compare('1.9.1', '<')
-- name: Get latest available version of Docker
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "docker"
- register: avail_docker_version
- failed_when: false
- changed_when: false
- when: docker_version is defined and not openshift.common.is_atomic | bool
-
# If a docker_version was requested, sanity check that we can install or upgrade to it, and
# no downgrade is required.
- name: Fail if Docker version requested but downgrade is required
@@ -43,16 +35,18 @@
package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
when: not openshift.common.is_atomic | bool
-- name: Ensure docker.service.d directory exists
- file:
- path: "{{ docker_systemd_dir }}"
- state: directory
+- block:
+ # Extend the default Docker service unit file when using iptables-services
+ - name: Ensure docker.service.d directory exists
+ file:
+ path: "{{ docker_systemd_dir }}"
+ state: directory
-# Extend the default Docker service unit file
-- name: Configure Docker service unit file
- template:
- dest: "{{ docker_systemd_dir }}/custom.conf"
- src: custom.conf.j2
+ - name: Configure Docker service unit file
+ template:
+ dest: "{{ docker_systemd_dir }}/custom.conf"
+ src: custom.conf.j2
+ when: not os_firewall_use_firewalld | default(True) | bool
- include: udev_workaround.yml
when: docker_udev_workaround | default(False) | bool
@@ -102,7 +96,7 @@
dest: /etc/sysconfig/docker
regexp: '^OPTIONS=.*$'
line: "OPTIONS='\
- {% if ansible_selinux and ansible_selinux.status == '''enabled''' %} --selinux-enabled{% endif %}\
+ {% if ansible_selinux.status | default(None) == '''enabled''' and docker_selinux_enabled | default(true) %} --selinux-enabled {% endif %}\
{% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\
{% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\
{% if docker_options is defined %} {{ docker_options }}{% endif %}\
diff --git a/roles/kube_nfs_volumes/library/partitionpool.py b/roles/kube_nfs_volumes/library/partitionpool.py
index 2cd454274..1857433c7 100644
--- a/roles/kube_nfs_volumes/library/partitionpool.py
+++ b/roles/kube_nfs_volumes/library/partitionpool.py
@@ -3,6 +3,8 @@
Ansible module for partitioning.
"""
+from __future__ import print_function
+
# There is no pyparted on our Jenkins worker
# pylint: disable=import-error
import parted
@@ -131,7 +133,7 @@ def partition(diskname, specs, force=False, check_mode=False):
disk = None
if disk and len(disk.partitions) > 0 and not force:
- print "skipping", diskname
+ print("skipping", diskname)
return 0
# create new partition table, wiping all existing data
@@ -220,7 +222,7 @@ def main():
try:
specs = parse_spec(sizes)
- except ValueError, ex:
+ except ValueError as ex:
err = "Error parsing sizes=" + sizes + ": " + str(ex)
module.fail_json(msg=err)
@@ -229,7 +231,7 @@ def main():
for disk in disks.split(","):
try:
changed_count += partition(disk, specs, force, module.check_mode)
- except Exception, ex:
+ except Exception as ex:
err = "Error creating partitions on " + disk + ": " + str(ex)
raise
# module.fail_json(msg=err)
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
new file mode 100644
index 000000000..1a361ae20
--- /dev/null
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -0,0 +1,1377 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: oc_edit
+short_description: Modify, and idempotently manage openshift objects.
+description:
+ - Modify openshift objects programmatically.
+options:
+ state:
+ description:
+ - Currently present is only supported state.
+ required: true
+ default: present
+ choices: ["present"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ kind:
+ description:
+ - The kind attribute of the object.
+ required: True
+ default: None
+ choices:
+ - bc
+ - buildconfig
+ - configmaps
+ - dc
+ - deploymentconfig
+ - imagestream
+ - imagestreamtag
+ - is
+ - istag
+ - namespace
+ - project
+ - projects
+ - node
+ - ns
+ - persistentvolume
+ - pv
+ - rc
+ - replicationcontroller
+ - routes
+ - scc
+ - secret
+ - securitycontextconstraints
+ - service
+ - svc
+ aliases: []
+ file_name:
+ description:
+ - The file name in which to edit
+ required: false
+ default: None
+ aliases: []
+ file_format:
+ description:
+ - The format of the file being edited.
+ required: false
+ default: yaml
+ aliases: []
+ content:
+ description:
+ - Content of the file
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Whether or not to force the operation
+ required: false
+ default: None
+ aliases: []
+ separator:
+ description:
+ - The separator format for the edit.
+ required: false
+ default: '.'
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_edit:
+ kind: rc
+ name: hawkular-cassandra-rc
+ namespace: openshift-infra
+ content:
+ spec.template.spec.containers[0].resources.limits.memory: 512
+ spec.template.spec.containers[0].resources.requests.memory: 256
+'''
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+class Edit(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools
+ '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ kind,
+ namespace,
+ resource_name=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ separator='.',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ super(Edit, self).__init__(namespace, kubeconfig)
+ self.namespace = namespace
+ self.kind = kind
+ self.name = resource_name
+ self.kubeconfig = kubeconfig
+ self.separator = separator
+ self.verbose = verbose
+
+ def get(self):
+ '''return a secret by name '''
+ return self._get(self.kind, self.name)
+
+ def update(self, file_name, content, force=False, content_type='yaml'):
+ '''run update '''
+ if file_name:
+ if content_type == 'yaml':
+ data = yaml.load(open(file_name))
+ elif content_type == 'json':
+ data = json.loads(open(file_name).read())
+
+ changes = []
+ yed = Yedit(filename=file_name, content=data, separator=self.separator)
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([not change[0] for change in changes]):
+ return {'returncode': 0, 'updated': False}
+
+ yed.write()
+
+ atexit.register(Utils.cleanup, [file_name])
+
+ return self._replace(file_name, force=force)
+
+ return self._replace_content(self.kind, self.name, content, force=force, sep=self.separator)
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ ocedit = Edit(params['kind'],
+ params['namespace'],
+ params['name'],
+ kubeconfig=params['kubeconfig'],
+ separator=params['separator'],
+ verbose=params['debug'])
+
+ api_rval = ocedit.get()
+
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], params['name']):
+ return {"failed": True, 'msg': api_rval}
+
+ ########
+ # Update
+ ########
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed edit'}
+
+ api_rval = ocedit.update(params['file_name'],
+ params['content'],
+ params['force'],
+ params['file_format'])
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, 'msg': api_rval}
+
+ if 'updated' in api_rval and not api_rval['updated']:
+ return {"changed": False, 'results': api_rval, 'state': 'present'}
+
+ # return the created object
+ api_rval = ocedit.get()
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, 'msg': api_rval}
+
+ return {"changed": True, 'results': api_rval, 'state': 'present'}
+
+
+def main():
+ '''
+ ansible oc module for editing objects
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default=None, required=True, type='str'),
+ kind=dict(required=True,
+ type='str',
+ choices=['dc', 'deploymentconfig',
+ 'rc', 'replicationcontroller',
+ 'svc', 'service',
+ 'scc', 'securitycontextconstraints',
+ 'ns', 'namespace', 'project', 'projects',
+ 'is', 'imagestream',
+ 'istag', 'imagestreamtag',
+ 'bc', 'buildconfig',
+ 'routes',
+ 'node',
+ 'secret',
+ 'pv', 'persistentvolume']),
+ file_name=dict(default=None, type='str'),
+ file_format=dict(default='yaml', type='str'),
+ content=dict(default=None, required=True, type='dict'),
+ force=dict(default=False, type='bool'),
+ separator=dict(default='.', type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = Edit.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
new file mode 100644
index 000000000..5b501484b
--- /dev/null
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -0,0 +1,1444 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: oc_obj
+short_description: Generic interface to openshift objects
+description:
+ - Manage openshift objects programmatically.
+options:
+ state:
+ description:
+ - Currently present is only supported state.
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ all_namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: false
+ aliases: []
+ kind:
+ description:
+ - The kind attribute of the object. e.g. dc, bc, svc, route
+ required: True
+ default: None
+ aliases: []
+ files:
+ description:
+ - A list of files provided for object
+ required: false
+ default: None
+ aliases: []
+ delete_after:
+ description:
+ - Whether or not to delete the files after processing them.
+ required: false
+ default: false
+ aliases: []
+ content:
+ description:
+ - Content of the object being managed.
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Whether or not to force the operation
+ required: false
+ default: None
+ aliases: []
+ selector:
+ description:
+ - Selector that gets added to the query.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_obj:
+ kind: dc
+ name: router
+ namespace: default
+register: router_output
+'''
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+# pylint: disable=too-many-instance-attributes
+class OCObject(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ # pylint allows 5. we need 6
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ kind,
+ namespace,
+ rname=None,
+ selector=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftOC '''
+ super(OCObject, self).__init__(namespace, kubeconfig,
+ all_namespaces=all_namespaces)
+ self.kind = kind
+ self.namespace = namespace
+ self.name = rname
+ self.selector = selector
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+
+ def get(self):
+ '''return a kind by name '''
+ results = self._get(self.kind, rname=self.name, selector=self.selector)
+ if results['returncode'] != 0 and 'stderr' in results and \
+ '\"%s\" not found' % self.name in results['stderr']:
+ results['returncode'] = 0
+
+ return results
+
+ def delete(self):
+ '''return all pods '''
+ return self._delete(self.kind, self.name)
+
+ def create(self, files=None, content=None):
+ '''
+ Create a config
+
+ NOTE: This creates the first file OR the first conent.
+ TODO: Handle all files and content passed in
+ '''
+ if files:
+ return self._create(files[0])
+
+ content['data'] = yaml.dump(content['data'])
+ content_file = Utils.create_files_from_contents(content)[0]
+
+ return self._create(content_file['path'])
+
+ # pylint: disable=too-many-function-args
+ def update(self, files=None, content=None, force=False):
+ '''update a current openshift object
+
+ This receives a list of file names or content
+ and takes the first and calls replace.
+
+ TODO: take an entire list
+ '''
+ if files:
+ return self._replace(files[0], force)
+
+ if content and 'data' in content:
+ content = content['data']
+
+ return self.update_content(content, force)
+
+ def update_content(self, content, force=False):
+ '''update an object through using the content param'''
+ return self._replace_content(self.kind, self.name, content, force=force)
+
+ def needs_update(self, files=None, content=None, content_type='yaml'):
+ ''' check to see if we need to update '''
+ objects = self.get()
+ if objects['returncode'] != 0:
+ return objects
+
+ # pylint: disable=no-member
+ data = None
+ if files:
+ data = Utils.get_resource_file(files[0], content_type)
+ elif content and 'data' in content:
+ data = content['data']
+ else:
+ data = content
+
+ # if equal then no need. So not equal is True
+ return not Utils.check_def_equal(data, objects['results'][0], skip_keys=None, debug=False)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, check_mode=False):
+ '''perform the ansible idempotent code'''
+
+ ocobj = OCObject(params['kind'],
+ params['namespace'],
+ params['name'],
+ params['selector'],
+ kubeconfig=params['kubeconfig'],
+ verbose=params['debug'],
+ all_namespaces=params['all_namespaces'])
+
+ state = params['state']
+
+ api_rval = ocobj.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': 'list'}
+
+ if not params['name']:
+ return {'failed': True, 'msg': 'Please specify a name when state is absent|present.'} # noqa: E501
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not Utils.exists(api_rval['results'], params['name']):
+ return {'changed': False, 'state': 'absent'}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'}
+
+ api_rval = ocobj.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], params['name']):
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create'}
+
+ # Create it here
+ api_rval = ocobj.create(params['files'], params['content'])
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = ocobj.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # Remove files
+ if params['files'] and params['delete_after']:
+ Utils.cleanup(params['files'])
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
+
+ ########
+ # Update
+ ########
+ # if a file path is passed, use it.
+ update = ocobj.needs_update(params['files'], params['content'])
+ if not isinstance(update, bool):
+ return {'failed': True, 'msg': update}
+
+ # No changes
+ if not update:
+ if params['files'] and params['delete_after']:
+ Utils.cleanup(params['files'])
+
+ return {'changed': False, 'results': api_rval['results'][0], 'state': "present"}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = ocobj.update(params['files'],
+ params['content'],
+ params['force'])
+
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = ocobj.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for services
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ all_namespaces=dict(defaul=False, type='bool'),
+ name=dict(default=None, type='str'),
+ files=dict(default=None, type='list'),
+ kind=dict(required=True, type='str'),
+ delete_after=dict(default=False, type='bool'),
+ content=dict(default=None, type='dict'),
+ force=dict(default=False, type='bool'),
+ selector=dict(default=None, type='str'),
+ ),
+ mutually_exclusive=[["content", "files"]],
+
+ supports_check_mode=True,
+ )
+ rval = OCObject.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
new file mode 100644
index 000000000..19c7462ea
--- /dev/null
+++ b/roles/lib_openshift/library/oc_route.py
@@ -0,0 +1,1614 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: oc_route
+short_description: Create, modify, and idempotently manage openshift routes.
+description:
+ - Manage openshift route objects programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ tls_termination:
+ description:
+ - The options for termination. e.g. reencrypt
+ required: false
+ default: None
+ aliases: []
+ dest_cacert_path:
+ description:
+ - The path to the dest_cacert
+ required: false
+ default: None
+ aliases: []
+ cacert_path:
+ description:
+ - The path to the cacert
+ required: false
+ default: None
+ aliases: []
+ cert_path:
+ description:
+ - The path to the cert
+ required: false
+ default: None
+ aliases: []
+ key_path:
+ description:
+ - The path to the key
+ required: false
+ default: None
+ aliases: []
+ dest_cacert_content:
+ description:
+ - The dest_cacert content
+ required: false
+ default: None
+ aliases: []
+ cacert_content:
+ description:
+ - The cacert content
+ required: false
+ default: None
+ aliases: []
+ cert_content:
+ description:
+ - The cert content
+ required: false
+ default: None
+ aliases: []
+ service_name:
+ description:
+ - The name of the service that this route points to.
+ required: false
+ default: None
+ aliases: []
+ host:
+ description:
+ - The host that the route will use. e.g. myapp.x.y.z
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: Configure certificates for reencrypt route
+ oc_route:
+ name: myapproute
+ namespace: awesomeapp
+ cert_path: "/etc/origin/master/named_certificates/myapp_cert
+ key_path: "/etc/origin/master/named_certificates/myapp_key
+ cacert_path: "/etc/origin/master/named_certificates/myapp_cacert
+ dest_cacert_content: "{{ dest_cacert_content }}"
+ service_name: myapp_php
+ host: myapp.awesomeapp.openshift.com
+ tls_termination: reencrypt
+ run_once: true
+'''
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+# noqa: E302,E301
+
+
+# pylint: disable=too-many-instance-attributes
+class RouteConfig(object):
+ ''' Handle route options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ namespace,
+ kubeconfig,
+ destcacert=None,
+ cacert=None,
+ cert=None,
+ key=None,
+ host=None,
+ tls_termination=None,
+ service_name=None,
+ wildcard_policy=None,
+ weight=None):
+ ''' constructor for handling route options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.namespace = namespace
+ self.host = host
+ self.tls_termination = tls_termination
+ self.destcacert = destcacert
+ self.cacert = cacert
+ self.cert = cert
+ self.key = key
+ self.service_name = service_name
+ self.data = {}
+ self.wildcard_policy = wildcard_policy
+ if wildcard_policy is None:
+ self.wildcard_policy = 'None'
+ self.weight = weight
+ if weight is None:
+ self.weight = 100
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' return a service as a dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'Route'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+ self.data['spec'] = {}
+
+ self.data['spec']['host'] = self.host
+
+ if self.tls_termination:
+ self.data['spec']['tls'] = {}
+
+ if self.tls_termination == 'reencrypt':
+ self.data['spec']['tls']['destinationCACertificate'] = self.destcacert
+ self.data['spec']['tls']['key'] = self.key
+ self.data['spec']['tls']['caCertificate'] = self.cacert
+ self.data['spec']['tls']['certificate'] = self.cert
+ self.data['spec']['tls']['termination'] = self.tls_termination
+
+ self.data['spec']['to'] = {'kind': 'Service',
+ 'name': self.service_name,
+ 'weight': self.weight}
+
+ self.data['spec']['wildcardPolicy'] = self.wildcard_policy
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class Route(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ wildcard_policy = "spec.wildcardPolicy"
+ host_path = "spec.host"
+ service_path = "spec.to.name"
+ weight_path = "spec.to.weight"
+ cert_path = "spec.tls.certificate"
+ cacert_path = "spec.tls.caCertificate"
+ destcacert_path = "spec.tls.destinationCACertificate"
+ termination_path = "spec.tls.termination"
+ key_path = "spec.tls.key"
+ kind = 'route'
+
+ def __init__(self, content):
+ '''Route constructor'''
+ super(Route, self).__init__(content=content)
+
+ def get_destcacert(self):
+ ''' return cert '''
+ return self.get(Route.destcacert_path)
+
+ def get_cert(self):
+ ''' return cert '''
+ return self.get(Route.cert_path)
+
+ def get_key(self):
+ ''' return key '''
+ return self.get(Route.key_path)
+
+ def get_cacert(self):
+ ''' return cacert '''
+ return self.get(Route.cacert_path)
+
+ def get_service(self):
+ ''' return service name '''
+ return self.get(Route.service_path)
+
+ def get_weight(self):
+ ''' return service weight '''
+ return self.get(Route.weight_path)
+
+ def get_termination(self):
+ ''' return tls termination'''
+ return self.get(Route.termination_path)
+
+ def get_host(self):
+ ''' return host '''
+ return self.get(Route.host_path)
+
+ def get_wildcard_policy(self):
+ ''' return wildcardPolicy '''
+ return self.get(Route.wildcard_policy)
+
+
+# pylint: disable=too-many-instance-attributes
+class OCRoute(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'route'
+
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCVolume '''
+ super(OCRoute, self).__init__(config.namespace, config.kubeconfig)
+ self.config = config
+ self.namespace = config.namespace
+ self._route = None
+
+ @property
+ def route(self):
+ ''' property function for route'''
+ if not self._route:
+ self.get()
+ return self._route
+
+ @route.setter
+ def route(self, data):
+ ''' setter function for route '''
+ self._route = data
+
+ def exists(self):
+ ''' return whether a route exists '''
+ if self.route:
+ return True
+
+ return False
+
+ def get(self):
+ '''return route information '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.route = Route(content=result['results'][0])
+ elif 'routes \"%s\" not found' % self.config.name in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ # need to update the tls information and the service name
+ return self._replace_content(self.kind, self.config.name, self.config.data)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ skip = []
+ return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=True)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, files, check_mode=False):
+ ''' run the idempotent asnible code
+
+ params comes from the ansible portion for this module
+ files: a dictionary for the certificates
+ {'cert': {'path': '',
+ 'content': '',
+ 'value': ''
+ }
+ }
+ check_mode: does the module support check mode. (module.check_mode)
+ '''
+
+ rconfig = RouteConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ files['destcacert']['value'],
+ files['cacert']['value'],
+ files['cert']['value'],
+ files['key']['value'],
+ params['host'],
+ params['tls_termination'],
+ params['service_name'],
+ params['wildcard_policy'],
+ params['weight'])
+
+ oc_route = OCRoute(rconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_route.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False,
+ 'results': api_rval['results'],
+ 'state': 'list'}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_route.exists():
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} # noqa: E501
+
+ api_rval = oc_route.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': "absent"} # noqa: E501
+ return {'changed': False, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_route.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} # noqa: E501
+
+ # Create it here
+ api_rval = oc_route.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ # return the created object
+ api_rval = oc_route.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
+
+ ########
+ # Update
+ ########
+ if oc_route.needs_update():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} # noqa: E501
+
+ api_rval = oc_route.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ # return the created object
+ api_rval = oc_route.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': False, 'results': api_rval, 'state': "present"}
+
+ # catch all
+ return {'failed': True, 'msg': "Unknown State passed"}
+
+
+def get_cert_data(path, content):
+ '''get the data for a particular value'''
+ if not path and not content:
+ return None
+
+ rval = None
+ if path and os.path.exists(path) and os.access(path, os.R_OK):
+ rval = open(path).read()
+ elif content:
+ rval = content
+
+ return rval
+
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for route
+ '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, required=True, type='str'),
+ namespace=dict(default=None, required=True, type='str'),
+ tls_termination=dict(default=None, type='str'),
+ dest_cacert_path=dict(default=None, type='str'),
+ cacert_path=dict(default=None, type='str'),
+ cert_path=dict(default=None, type='str'),
+ key_path=dict(default=None, type='str'),
+ dest_cacert_content=dict(default=None, type='str'),
+ cacert_content=dict(default=None, type='str'),
+ cert_content=dict(default=None, type='str'),
+ key_content=dict(default=None, type='str'),
+ service_name=dict(default=None, type='str'),
+ host=dict(default=None, type='str'),
+ wildcard_policy=dict(default=None, type='str'),
+ weight=dict(default=None, type='int'),
+ ),
+ mutually_exclusive=[('dest_cacert_path', 'dest_cacert_content'),
+ ('cacert_path', 'cacert_content'),
+ ('cert_path', 'cert_content'),
+ ('key_path', 'key_content'), ],
+ supports_check_mode=True,
+ )
+ files = {'destcacert': {'path': module.params['dest_cacert_path'],
+ 'content': module.params['dest_cacert_content'],
+ 'value': None, },
+ 'cacert': {'path': module.params['cacert_path'],
+ 'content': module.params['cacert_content'],
+ 'value': None, },
+ 'cert': {'path': module.params['cert_path'],
+ 'content': module.params['cert_content'],
+ 'value': None, },
+ 'key': {'path': module.params['key_path'],
+ 'content': module.params['key_content'],
+ 'value': None, }, }
+
+ if module.params['tls_termination']:
+ for key, option in files.items():
+ if key == 'destcacert' and module.params['tls_termination'] != 'reencrypt':
+ continue
+
+ option['value'] = get_cert_data(option['path'], option['content'])
+
+ if not option['value']:
+ module.fail_json(msg='Verify that you pass a value for %s' % key)
+
+ results = OCRoute.run_ansible(module.params, files, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
new file mode 100644
index 000000000..197a0a947
--- /dev/null
+++ b/roles/lib_openshift/library/oc_version.py
@@ -0,0 +1,1232 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: oc_version
+short_description: Return the current openshift version
+description:
+ - Return the openshift installed version. `oc version`
+options:
+ state:
+ description:
+ - Currently list is only supported state.
+ required: true
+ default: list
+ choices: ["list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_version:
+- name: get oc version
+ oc_version:
+ register: oc_version
+'''
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+
+
+# pylint: disable=too-many-instance-attributes
+class OCVersion(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ debug):
+ ''' Constructor for OCVersion '''
+ super(OCVersion, self).__init__(None, config)
+ self.debug = debug
+
+ def get(self):
+ '''get and return version information '''
+
+ results = {}
+
+ version_results = self._version()
+
+ if version_results['returncode'] == 0:
+ filtered_vers = Utils.filter_versions(version_results['results'])
+ custom_vers = Utils.add_custom_versions(filtered_vers)
+
+ results['returncode'] = version_results['returncode']
+ results.update(filtered_vers)
+ results.update(custom_vers)
+
+ return results
+
+ raise OpenShiftCLIError('Problem detecting openshift version.')
+
+ @staticmethod
+ def run_ansible(params):
+ '''run the idempotent ansible code'''
+ oc_version = OCVersion(params['kubeconfig'], params['debug'])
+
+ if params['state'] == 'list':
+
+ #pylint: disable=protected-access
+ result = oc_version.get()
+ return {'state': params['state'],
+ 'results': result,
+ 'changed': False}
+
+def main():
+ ''' ansible oc module for version '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='list', type='str',
+ choices=['list']),
+ debug=dict(default=False, type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCVersion.run_ansible(module.params)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+
+ module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_edit.py b/roles/lib_openshift/src/ansible/oc_edit.py
new file mode 100644
index 000000000..5c5954747
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_edit.py
@@ -0,0 +1,48 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def main():
+ '''
+ ansible oc module for editing objects
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default=None, required=True, type='str'),
+ kind=dict(required=True,
+ type='str',
+ choices=['dc', 'deploymentconfig',
+ 'rc', 'replicationcontroller',
+ 'svc', 'service',
+ 'scc', 'securitycontextconstraints',
+ 'ns', 'namespace', 'project', 'projects',
+ 'is', 'imagestream',
+ 'istag', 'imagestreamtag',
+ 'bc', 'buildconfig',
+ 'routes',
+ 'node',
+ 'secret',
+ 'pv', 'persistentvolume']),
+ file_name=dict(default=None, type='str'),
+ file_format=dict(default='yaml', type='str'),
+ content=dict(default=None, required=True, type='dict'),
+ force=dict(default=False, type='bool'),
+ separator=dict(default='.', type='str'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = Edit.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_obj.py b/roles/lib_openshift/src/ansible/oc_obj.py
new file mode 100644
index 000000000..701740e4f
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_obj.py
@@ -0,0 +1,37 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for services
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ all_namespaces=dict(defaul=False, type='bool'),
+ name=dict(default=None, type='str'),
+ files=dict(default=None, type='list'),
+ kind=dict(required=True, type='str'),
+ delete_after=dict(default=False, type='bool'),
+ content=dict(default=None, type='dict'),
+ force=dict(default=False, type='bool'),
+ selector=dict(default=None, type='str'),
+ ),
+ mutually_exclusive=[["content", "files"]],
+
+ supports_check_mode=True,
+ )
+ rval = OCObject.run_ansible(module.params, module.check_mode)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_route.py b/roles/lib_openshift/src/ansible/oc_route.py
new file mode 100644
index 000000000..c87e6738f
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_route.py
@@ -0,0 +1,84 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+def get_cert_data(path, content):
+ '''get the data for a particular value'''
+ if not path and not content:
+ return None
+
+ rval = None
+ if path and os.path.exists(path) and os.access(path, os.R_OK):
+ rval = open(path).read()
+ elif content:
+ rval = content
+
+ return rval
+
+
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible oc module for route
+ '''
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ name=dict(default=None, required=True, type='str'),
+ namespace=dict(default=None, required=True, type='str'),
+ tls_termination=dict(default=None, type='str'),
+ dest_cacert_path=dict(default=None, type='str'),
+ cacert_path=dict(default=None, type='str'),
+ cert_path=dict(default=None, type='str'),
+ key_path=dict(default=None, type='str'),
+ dest_cacert_content=dict(default=None, type='str'),
+ cacert_content=dict(default=None, type='str'),
+ cert_content=dict(default=None, type='str'),
+ key_content=dict(default=None, type='str'),
+ service_name=dict(default=None, type='str'),
+ host=dict(default=None, type='str'),
+ wildcard_policy=dict(default=None, type='str'),
+ weight=dict(default=None, type='int'),
+ ),
+ mutually_exclusive=[('dest_cacert_path', 'dest_cacert_content'),
+ ('cacert_path', 'cacert_content'),
+ ('cert_path', 'cert_content'),
+ ('key_path', 'key_content'), ],
+ supports_check_mode=True,
+ )
+ files = {'destcacert': {'path': module.params['dest_cacert_path'],
+ 'content': module.params['dest_cacert_content'],
+ 'value': None, },
+ 'cacert': {'path': module.params['cacert_path'],
+ 'content': module.params['cacert_content'],
+ 'value': None, },
+ 'cert': {'path': module.params['cert_path'],
+ 'content': module.params['cert_content'],
+ 'value': None, },
+ 'key': {'path': module.params['key_path'],
+ 'content': module.params['key_content'],
+ 'value': None, }, }
+
+ if module.params['tls_termination']:
+ for key, option in files.items():
+ if key == 'destcacert' and module.params['tls_termination'] != 'reencrypt':
+ continue
+
+ option['value'] = get_cert_data(option['path'], option['content'])
+
+ if not option['value']:
+ module.fail_json(msg='Verify that you pass a value for %s' % key)
+
+ results = OCRoute.run_ansible(module.params, files, module.check_mode)
+
+ if 'failed' in results:
+ module.fail_json(**results)
+
+ module.exit_json(**results)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/ansible/oc_version.py b/roles/lib_openshift/src/ansible/oc_version.py
new file mode 100644
index 000000000..57ef849ca
--- /dev/null
+++ b/roles/lib_openshift/src/ansible/oc_version.py
@@ -0,0 +1,26 @@
+# pylint: skip-file
+# flake8: noqa
+
+def main():
+ ''' ansible oc module for version '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ state=dict(default='list', type='str',
+ choices=['list']),
+ debug=dict(default=False, type='bool'),
+ ),
+ supports_check_mode=True,
+ )
+
+ rval = OCVersion.run_ansible(module.params)
+ if 'failed' in rval:
+ module.fail_json(**rval)
+
+
+ module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/class/oc_edit.py b/roles/lib_openshift/src/class/oc_edit.py
new file mode 100644
index 000000000..0734e2085
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_edit.py
@@ -0,0 +1,94 @@
+# pylint: skip-file
+# flake8: noqa
+
+class Edit(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools
+ '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ kind,
+ namespace,
+ resource_name=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ separator='.',
+ verbose=False):
+ ''' Constructor for OpenshiftOC '''
+ super(Edit, self).__init__(namespace, kubeconfig)
+ self.namespace = namespace
+ self.kind = kind
+ self.name = resource_name
+ self.kubeconfig = kubeconfig
+ self.separator = separator
+ self.verbose = verbose
+
+ def get(self):
+ '''return a secret by name '''
+ return self._get(self.kind, self.name)
+
+ def update(self, file_name, content, force=False, content_type='yaml'):
+ '''run update '''
+ if file_name:
+ if content_type == 'yaml':
+ data = yaml.load(open(file_name))
+ elif content_type == 'json':
+ data = json.loads(open(file_name).read())
+
+ changes = []
+ yed = Yedit(filename=file_name, content=data, separator=self.separator)
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([not change[0] for change in changes]):
+ return {'returncode': 0, 'updated': False}
+
+ yed.write()
+
+ atexit.register(Utils.cleanup, [file_name])
+
+ return self._replace(file_name, force=force)
+
+ return self._replace_content(self.kind, self.name, content, force=force, sep=self.separator)
+
+ @staticmethod
+ def run_ansible(params, check_mode):
+ '''run the ansible idempotent code'''
+
+ ocedit = Edit(params['kind'],
+ params['namespace'],
+ params['name'],
+ kubeconfig=params['kubeconfig'],
+ separator=params['separator'],
+ verbose=params['debug'])
+
+ api_rval = ocedit.get()
+
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], params['name']):
+ return {"failed": True, 'msg': api_rval}
+
+ ########
+ # Update
+ ########
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed edit'}
+
+ api_rval = ocedit.update(params['file_name'],
+ params['content'],
+ params['force'],
+ params['file_format'])
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, 'msg': api_rval}
+
+ if 'updated' in api_rval and not api_rval['updated']:
+ return {"changed": False, 'results': api_rval, 'state': 'present'}
+
+ # return the created object
+ api_rval = ocedit.get()
+
+ if api_rval['returncode'] != 0:
+ return {"failed": True, 'msg': api_rval}
+
+ return {"changed": True, 'results': api_rval, 'state': 'present'}
diff --git a/roles/lib_openshift/src/class/oc_obj.py b/roles/lib_openshift/src/class/oc_obj.py
new file mode 100644
index 000000000..9d0b8e45b
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_obj.py
@@ -0,0 +1,193 @@
+# pylint: skip-file
+# flake8: noqa
+
+# pylint: disable=too-many-instance-attributes
+class OCObject(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+
+ # pylint allows 5. we need 6
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ kind,
+ namespace,
+ rname=None,
+ selector=None,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftOC '''
+ super(OCObject, self).__init__(namespace, kubeconfig,
+ all_namespaces=all_namespaces)
+ self.kind = kind
+ self.namespace = namespace
+ self.name = rname
+ self.selector = selector
+ self.kubeconfig = kubeconfig
+ self.verbose = verbose
+
+ def get(self):
+ '''return a kind by name '''
+ results = self._get(self.kind, rname=self.name, selector=self.selector)
+ if results['returncode'] != 0 and 'stderr' in results and \
+ '\"%s\" not found' % self.name in results['stderr']:
+ results['returncode'] = 0
+
+ return results
+
+ def delete(self):
+ '''return all pods '''
+ return self._delete(self.kind, self.name)
+
+ def create(self, files=None, content=None):
+ '''
+ Create a config
+
+ NOTE: This creates the first file OR the first conent.
+ TODO: Handle all files and content passed in
+ '''
+ if files:
+ return self._create(files[0])
+
+ content['data'] = yaml.dump(content['data'])
+ content_file = Utils.create_files_from_contents(content)[0]
+
+ return self._create(content_file['path'])
+
+ # pylint: disable=too-many-function-args
+ def update(self, files=None, content=None, force=False):
+ '''update a current openshift object
+
+ This receives a list of file names or content
+ and takes the first and calls replace.
+
+ TODO: take an entire list
+ '''
+ if files:
+ return self._replace(files[0], force)
+
+ if content and 'data' in content:
+ content = content['data']
+
+ return self.update_content(content, force)
+
+ def update_content(self, content, force=False):
+ '''update an object through using the content param'''
+ return self._replace_content(self.kind, self.name, content, force=force)
+
+ def needs_update(self, files=None, content=None, content_type='yaml'):
+ ''' check to see if we need to update '''
+ objects = self.get()
+ if objects['returncode'] != 0:
+ return objects
+
+ # pylint: disable=no-member
+ data = None
+ if files:
+ data = Utils.get_resource_file(files[0], content_type)
+ elif content and 'data' in content:
+ data = content['data']
+ else:
+ data = content
+
+ # if equal then no need. So not equal is True
+ return not Utils.check_def_equal(data, objects['results'][0], skip_keys=None, debug=False)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, check_mode=False):
+ '''perform the ansible idempotent code'''
+
+ ocobj = OCObject(params['kind'],
+ params['namespace'],
+ params['name'],
+ params['selector'],
+ kubeconfig=params['kubeconfig'],
+ verbose=params['debug'],
+ all_namespaces=params['all_namespaces'])
+
+ state = params['state']
+
+ api_rval = ocobj.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False, 'results': api_rval, 'state': 'list'}
+
+ if not params['name']:
+ return {'failed': True, 'msg': 'Please specify a name when state is absent|present.'} # noqa: E501
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not Utils.exists(api_rval['results'], params['name']):
+ return {'changed': False, 'state': 'absent'}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a delete'}
+
+ api_rval = ocobj.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not Utils.exists(api_rval['results'], params['name']):
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create'}
+
+ # Create it here
+ api_rval = ocobj.create(params['files'], params['content'])
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = ocobj.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # Remove files
+ if params['files'] and params['delete_after']:
+ Utils.cleanup(params['files'])
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
+
+ ########
+ # Update
+ ########
+ # if a file path is passed, use it.
+ update = ocobj.needs_update(params['files'], params['content'])
+ if not isinstance(update, bool):
+ return {'failed': True, 'msg': update}
+
+ # No changes
+ if not update:
+ if params['files'] and params['delete_after']:
+ Utils.cleanup(params['files'])
+
+ return {'changed': False, 'results': api_rval['results'][0], 'state': "present"}
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'}
+
+ api_rval = ocobj.update(params['files'],
+ params['content'],
+ params['force'])
+
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ # return the created object
+ api_rval = ocobj.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval}
+
+ return {'changed': True, 'results': api_rval, 'state': "present"}
diff --git a/roles/lib_openshift/src/class/oc_route.py b/roles/lib_openshift/src/class/oc_route.py
new file mode 100644
index 000000000..42af2c01c
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_route.py
@@ -0,0 +1,170 @@
+# pylint: skip-file
+# flake8: noqa
+
+
+# pylint: disable=too-many-instance-attributes
+class OCRoute(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ kind = 'route'
+
+ def __init__(self,
+ config,
+ verbose=False):
+ ''' Constructor for OCVolume '''
+ super(OCRoute, self).__init__(config.namespace, config.kubeconfig)
+ self.config = config
+ self.namespace = config.namespace
+ self._route = None
+
+ @property
+ def route(self):
+ ''' property function for route'''
+ if not self._route:
+ self.get()
+ return self._route
+
+ @route.setter
+ def route(self, data):
+ ''' setter function for route '''
+ self._route = data
+
+ def exists(self):
+ ''' return whether a route exists '''
+ if self.route:
+ return True
+
+ return False
+
+ def get(self):
+ '''return route information '''
+ result = self._get(self.kind, self.config.name)
+ if result['returncode'] == 0:
+ self.route = Route(content=result['results'][0])
+ elif 'routes \"%s\" not found' % self.config.name in result['stderr']:
+ result['returncode'] = 0
+ result['results'] = [{}]
+
+ return result
+
+ def delete(self):
+ '''delete the object'''
+ return self._delete(self.kind, self.config.name)
+
+ def create(self):
+ '''create the object'''
+ return self._create_from_content(self.config.name, self.config.data)
+
+ def update(self):
+ '''update the object'''
+ # need to update the tls information and the service name
+ return self._replace_content(self.kind, self.config.name, self.config.data)
+
+ def needs_update(self):
+ ''' verify an update is needed '''
+ skip = []
+ return not Utils.check_def_equal(self.config.data, self.route.yaml_dict, skip_keys=skip, debug=True)
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(params, files, check_mode=False):
+ ''' run the idempotent asnible code
+
+ params comes from the ansible portion for this module
+ files: a dictionary for the certificates
+ {'cert': {'path': '',
+ 'content': '',
+ 'value': ''
+ }
+ }
+ check_mode: does the module support check mode. (module.check_mode)
+ '''
+
+ rconfig = RouteConfig(params['name'],
+ params['namespace'],
+ params['kubeconfig'],
+ files['destcacert']['value'],
+ files['cacert']['value'],
+ files['cert']['value'],
+ files['key']['value'],
+ params['host'],
+ params['tls_termination'],
+ params['service_name'],
+ params['wildcard_policy'],
+ params['weight'])
+
+ oc_route = OCRoute(rconfig, verbose=params['debug'])
+
+ state = params['state']
+
+ api_rval = oc_route.get()
+
+ #####
+ # Get
+ #####
+ if state == 'list':
+ return {'changed': False,
+ 'results': api_rval['results'],
+ 'state': 'list'}
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if oc_route.exists():
+
+ if check_mode:
+ return {'changed': False, 'msg': 'CHECK_MODE: Would have performed a delete.'} # noqa: E501
+
+ api_rval = oc_route.delete()
+
+ return {'changed': True, 'results': api_rval, 'state': "absent"} # noqa: E501
+ return {'changed': False, 'state': 'absent'}
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not oc_route.exists():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed a create.'} # noqa: E501
+
+ # Create it here
+ api_rval = oc_route.create()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ # return the created object
+ api_rval = oc_route.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
+
+ ########
+ # Update
+ ########
+ if oc_route.needs_update():
+
+ if check_mode:
+ return {'changed': True, 'msg': 'CHECK_MODE: Would have performed an update.'} # noqa: E501
+
+ api_rval = oc_route.update()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ # return the created object
+ api_rval = oc_route.get()
+
+ if api_rval['returncode'] != 0:
+ return {'failed': True, 'msg': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': True, 'results': api_rval, 'state': "present"} # noqa: E501
+
+ return {'changed': False, 'results': api_rval, 'state': "present"}
+
+ # catch all
+ return {'failed': True, 'msg': "Unknown State passed"}
diff --git a/roles/lib_openshift/src/class/oc_version.py b/roles/lib_openshift/src/class/oc_version.py
new file mode 100644
index 000000000..7f8c721d8
--- /dev/null
+++ b/roles/lib_openshift/src/class/oc_version.py
@@ -0,0 +1,47 @@
+# flake8: noqa
+# pylint: skip-file
+
+
+# pylint: disable=too-many-instance-attributes
+class OCVersion(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ # pylint allows 5
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ config,
+ debug):
+ ''' Constructor for OCVersion '''
+ super(OCVersion, self).__init__(None, config)
+ self.debug = debug
+
+ def get(self):
+ '''get and return version information '''
+
+ results = {}
+
+ version_results = self._version()
+
+ if version_results['returncode'] == 0:
+ filtered_vers = Utils.filter_versions(version_results['results'])
+ custom_vers = Utils.add_custom_versions(filtered_vers)
+
+ results['returncode'] = version_results['returncode']
+ results.update(filtered_vers)
+ results.update(custom_vers)
+
+ return results
+
+ raise OpenShiftCLIError('Problem detecting openshift version.')
+
+ @staticmethod
+ def run_ansible(params):
+ '''run the idempotent ansible code'''
+ oc_version = OCVersion(params['kubeconfig'], params['debug'])
+
+ if params['state'] == 'list':
+
+ #pylint: disable=protected-access
+ result = oc_version.get()
+ return {'state': params['state'],
+ 'results': result,
+ 'changed': False}
diff --git a/roles/lib_openshift/src/doc/edit b/roles/lib_openshift/src/doc/edit
new file mode 100644
index 000000000..212d88f65
--- /dev/null
+++ b/roles/lib_openshift/src/doc/edit
@@ -0,0 +1,116 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_edit
+short_description: Modify, and idempotently manage openshift objects.
+description:
+ - Modify openshift objects programmatically.
+options:
+ state:
+ description:
+ - Currently present is only supported state.
+ required: true
+ default: present
+ choices: ["present"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ kind:
+ description:
+ - The kind attribute of the object.
+ required: True
+ default: None
+ choices:
+ - bc
+ - buildconfig
+ - configmaps
+ - dc
+ - deploymentconfig
+ - imagestream
+ - imagestreamtag
+ - is
+ - istag
+ - namespace
+ - project
+ - projects
+ - node
+ - ns
+ - persistentvolume
+ - pv
+ - rc
+ - replicationcontroller
+ - routes
+ - scc
+ - secret
+ - securitycontextconstraints
+ - service
+ - svc
+ aliases: []
+ file_name:
+ description:
+ - The file name in which to edit
+ required: false
+ default: None
+ aliases: []
+ file_format:
+ description:
+ - The format of the file being edited.
+ required: false
+ default: yaml
+ aliases: []
+ content:
+ description:
+ - Content of the file
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Whether or not to force the operation
+ required: false
+ default: None
+ aliases: []
+ separator:
+ description:
+ - The separator format for the edit.
+ required: false
+ default: '.'
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_edit:
+ kind: rc
+ name: hawkular-cassandra-rc
+ namespace: openshift-infra
+ content:
+ spec.template.spec.containers[0].resources.limits.memory: 512
+ spec.template.spec.containers[0].resources.requests.memory: 256
+'''
diff --git a/roles/lib_openshift/src/doc/generated b/roles/lib_openshift/src/doc/generated
new file mode 100644
index 000000000..b55d18cff
--- /dev/null
+++ b/roles/lib_openshift/src/doc/generated
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# flake8: noqa: T001
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
diff --git a/roles/lib_openshift/src/doc/license b/roles/lib_openshift/src/doc/license
new file mode 100644
index 000000000..717bb7f17
--- /dev/null
+++ b/roles/lib_openshift/src/doc/license
@@ -0,0 +1,16 @@
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/roles/lib_openshift/src/doc/obj b/roles/lib_openshift/src/doc/obj
new file mode 100644
index 000000000..e44843eb3
--- /dev/null
+++ b/roles/lib_openshift/src/doc/obj
@@ -0,0 +1,95 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_obj
+short_description: Generic interface to openshift objects
+description:
+ - Manage openshift objects programmatically.
+options:
+ state:
+ description:
+ - Currently present is only supported state.
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ all_namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: false
+ aliases: []
+ kind:
+ description:
+ - The kind attribute of the object. e.g. dc, bc, svc, route
+ required: True
+ default: None
+ aliases: []
+ files:
+ description:
+ - A list of files provided for object
+ required: false
+ default: None
+ aliases: []
+ delete_after:
+ description:
+ - Whether or not to delete the files after processing them.
+ required: false
+ default: false
+ aliases: []
+ content:
+ description:
+ - Content of the object being managed.
+ required: false
+ default: None
+ aliases: []
+ force:
+ description:
+ - Whether or not to force the operation
+ required: false
+ default: None
+ aliases: []
+ selector:
+ description:
+ - Selector that gets added to the query.
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_obj:
+ kind: dc
+ name: router
+ namespace: default
+register: router_output
+'''
diff --git a/roles/lib_openshift/src/doc/route b/roles/lib_openshift/src/doc/route
new file mode 100644
index 000000000..1797d4d33
--- /dev/null
+++ b/roles/lib_openshift/src/doc/route
@@ -0,0 +1,120 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_route
+short_description: Create, modify, and idempotently manage openshift routes.
+description:
+ - Manage openshift route objects programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+ name:
+ description:
+ - Name of the object that is being queried.
+ required: false
+ default: None
+ aliases: []
+ namespace:
+ description:
+ - The namespace where the object lives.
+ required: false
+ default: str
+ aliases: []
+ tls_termination:
+ description:
+ - The options for termination. e.g. reencrypt
+ required: false
+ default: None
+ aliases: []
+ dest_cacert_path:
+ description:
+ - The path to the dest_cacert
+ required: false
+ default: None
+ aliases: []
+ cacert_path:
+ description:
+ - The path to the cacert
+ required: false
+ default: None
+ aliases: []
+ cert_path:
+ description:
+ - The path to the cert
+ required: false
+ default: None
+ aliases: []
+ key_path:
+ description:
+ - The path to the key
+ required: false
+ default: None
+ aliases: []
+ dest_cacert_content:
+ description:
+ - The dest_cacert content
+ required: false
+ default: None
+ aliases: []
+ cacert_content:
+ description:
+ - The cacert content
+ required: false
+ default: None
+ aliases: []
+ cert_content:
+ description:
+ - The cert content
+ required: false
+ default: None
+ aliases: []
+ service_name:
+ description:
+ - The name of the service that this route points to.
+ required: false
+ default: None
+ aliases: []
+ host:
+ description:
+ - The host that the route will use. e.g. myapp.x.y.z
+ required: false
+ default: None
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+- name: Configure certificates for reencrypt route
+ oc_route:
+ name: myapproute
+ namespace: awesomeapp
+ cert_path: "/etc/origin/master/named_certificates/myapp_cert
+ key_path: "/etc/origin/master/named_certificates/myapp_key
+ cacert_path: "/etc/origin/master/named_certificates/myapp_cacert
+ dest_cacert_content: "{{ dest_cacert_content }}"
+ service_name: myapp_php
+ host: myapp.awesomeapp.openshift.com
+ tls_termination: reencrypt
+ run_once: true
+'''
diff --git a/roles/lib_openshift/src/doc/version b/roles/lib_openshift/src/doc/version
new file mode 100644
index 000000000..c0fdd53e7
--- /dev/null
+++ b/roles/lib_openshift/src/doc/version
@@ -0,0 +1,40 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: oc_version
+short_description: Return the current openshift version
+description:
+ - Return the openshift installed version. `oc version`
+options:
+ state:
+ description:
+ - Currently list is only supported state.
+ required: true
+ default: list
+ choices: ["list"]
+ aliases: []
+ kubeconfig:
+ description:
+ - The path for the kubeconfig file to use for authentication
+ required: false
+ default: /etc/origin/master/admin.kubeconfig
+ aliases: []
+ debug:
+ description:
+ - Turn on debug output.
+ required: false
+ default: False
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+oc_version:
+- name: get oc version
+ oc_version:
+ register: oc_version
+'''
diff --git a/roles/lib_openshift/src/generate.py b/roles/lib_openshift/src/generate.py
new file mode 100755
index 000000000..6daade108
--- /dev/null
+++ b/roles/lib_openshift/src/generate.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+'''
+ Generate the openshift-ansible/roles/lib_openshift_cli/library/ modules.
+'''
+
+import argparse
+import os
+import yaml
+import six
+
+OPENSHIFT_ANSIBLE_PATH = os.path.dirname(os.path.realpath(__file__))
+OPENSHIFT_ANSIBLE_SOURCES_PATH = os.path.join(OPENSHIFT_ANSIBLE_PATH, 'sources.yml') # noqa: E501
+LIBRARY = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/')
+
+
+class GenerateAnsibleException(Exception):
+ '''General Exception for generate function'''
+ pass
+
+
+def parse_args():
+ '''parse arguments to generate'''
+ parser = argparse.ArgumentParser(description="Generate ansible modules.")
+ parser.add_argument('--verify', action='store_true', default=False,
+ help='Verify library code matches the generated code.')
+
+ return parser.parse_args()
+
+
+def generate(parts):
+ '''generate the source code for the ansible modules'''
+
+ data = six.StringIO()
+ for fpart in parts:
+ # first line is pylint disable so skip it
+ with open(os.path.join(OPENSHIFT_ANSIBLE_PATH, fpart)) as pfd:
+ for idx, line in enumerate(pfd):
+ if idx in [0, 1] and 'flake8: noqa' in line or 'pylint: skip-file' in line: # noqa: E501
+ continue
+
+ data.write(line)
+
+ return data
+
+
+def get_sources():
+ '''return the path to the generate sources'''
+ return yaml.load(open(OPENSHIFT_ANSIBLE_SOURCES_PATH).read())
+
+
+def verify():
+ '''verify if the generated code matches the library code'''
+ for fname, parts in get_sources().items():
+ data = generate(parts)
+ fname = os.path.join(LIBRARY, fname)
+ if not open(fname).read() == data.getvalue():
+ raise GenerateAnsibleException('Generated content does not match for %s' % fname)
+
+
+def main():
+ ''' combine the necessary files to create the ansible module '''
+ args = parse_args()
+ if args.verify:
+ verify()
+
+ for fname, parts in get_sources().items():
+ data = generate(parts)
+ fname = os.path.join(LIBRARY, fname)
+ with open(fname, 'w') as afd:
+ afd.seek(0)
+ afd.write(data.getvalue())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
new file mode 100644
index 000000000..db5f4e890
--- /dev/null
+++ b/roles/lib_openshift/src/lib/base.py
@@ -0,0 +1,522 @@
+# pylint: skip-file
+# flake8: noqa
+# pylint: disable=too-many-lines
+# noqa: E301,E302,E303,T001
+
+
+class OpenShiftCLIError(Exception):
+ '''Exception class for openshiftcli'''
+ pass
+
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False,
+ all_namespaces=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+ self.all_namespaces = all_namespaces
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False, sep='.'):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0], separator=sep)
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([change[0] for change in changes]):
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ return {'returncode': 0, 'updated': False}
+
+ def _replace(self, fname, force=False):
+ '''replace the current object with oc replace'''
+ cmd = ['replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create_from_content(self, rname, content):
+ '''create a temporary file and then call oc create on it'''
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, content=content)
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._create(fname)
+
+ def _create(self, fname):
+ '''call oc create on a filename'''
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _delete(self, resource, rname, selector=None):
+ '''call oc delete on a resource'''
+ cmd = ['delete', resource, rname]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ return self.openshift_cmd(cmd)
+
+ def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
+ '''process a template
+
+ template_name: the name of the template to process
+ create: whether to send to oc create after processing
+ params: the parameters for the template
+ template_data: the incoming template's data; instead of a file
+ '''
+ cmd = ['process']
+ if template_data:
+ cmd.extend(['-f', '-'])
+ else:
+ cmd.append(template_name)
+ if params:
+ param_str = ["%s=%s" % (key, value) for key, value in params.items()]
+ cmd.append('-v')
+ cmd.extend(param_str)
+
+ results = self.openshift_cmd(cmd, output=True, input_data=template_data)
+
+ if results['returncode'] != 0 or not create:
+ return results
+
+ fname = '/tmp/%s' % template_name
+ yed = Yedit(fname, results['results'])
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self.openshift_cmd(['create', '-f', fname])
+
+ def _get(self, resource, rname=None, selector=None):
+ '''return a resource by name '''
+ cmd = ['get', resource]
+ if selector:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.extend(['-o', 'json'])
+
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if 'items' in rval:
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def _schedulable(self, node=None, selector=None, schedulable=True):
+ ''' perform oadm manage-node scheduable '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ cmd.append('--schedulable=%s' % schedulable)
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
+
+ def _list_pods(self, node=None, selector=None, pod_selector=None):
+ ''' perform oadm list pods
+
+ node: the node in which to list pods
+ selector: the label selector filter if provided
+ pod_selector: the pod selector filter if provided
+ '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ cmd.extend(['--list-pods', '-o', 'json'])
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ # pylint: disable=too-many-arguments
+ def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
+ ''' perform oadm manage-node evacuate '''
+ cmd = ['manage-node']
+ if node:
+ cmd.extend(node)
+ else:
+ cmd.append('--selector=%s' % selector)
+
+ if dry_run:
+ cmd.append('--dry-run')
+
+ if pod_selector:
+ cmd.append('--pod-selector=%s' % pod_selector)
+
+ if grace_period:
+ cmd.append('--grace-period=%s' % int(grace_period))
+
+ if force:
+ cmd.append('--force')
+
+ cmd.append('--evacuate')
+
+ return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
+
+ def _version(self):
+ ''' return the openshift version'''
+ return self.openshift_cmd(['version'], output=True, output_type='raw')
+
+ def _import_image(self, url=None, name=None, tag=None):
+ ''' perform image import '''
+ cmd = ['import-image']
+
+ image = '{0}'.format(name)
+ if tag:
+ image += ':{0}'.format(tag)
+
+ cmd.append(image)
+
+ if url:
+ cmd.append('--from={0}/{1}'.format(url, image))
+
+ cmd.append('-n{0}'.format(self.namespace))
+
+ cmd.append('--confirm')
+ return self.openshift_cmd(cmd)
+
+ # pylint: disable=too-many-arguments,too-many-branches
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''Base command for oc '''
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ if self.all_namespaces:
+ cmds.extend(['--all-namespaces'])
+ elif self.namespace:
+ cmds.extend(['-n', self.namespace])
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print(' '.join(cmds))
+
+ proc = subprocess.Popen(cmds,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ stdout, stderr = proc.communicate(input_data)
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds)}
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.args:
+ err = err.args
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print("STDOUT: {0}".format(stdout))
+ print("STDERR: {0}".format(stderr))
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds})
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {}})
+
+ return rval
+
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype='yaml'):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(content, content_type=None):
+ '''Turn an array of dict: filename, content into a files array'''
+ if not isinstance(content, list):
+ content = [content]
+ files = []
+ for item in content:
+ path = Utils.create_file(item['path'], item['data'], ftype=content_type)
+ files.append({'name': os.path.basename(path), 'path': path})
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if 'metadata' in result and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.load(contents, yaml.RoundTripLoader)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ @staticmethod
+ def filter_versions(stdout):
+ ''' filter the oc version output '''
+
+ version_dict = {}
+ version_search = ['oc', 'openshift', 'kubernetes']
+
+ for line in stdout.strip().split('\n'):
+ for term in version_search:
+ if not line:
+ continue
+ if line.startswith(term):
+ version_dict[term] = line.split()[-1]
+
+ # horrible hack to get openshift version in Openshift 3.2
+ # By default "oc version in 3.2 does not return an "openshift" version
+ if "openshift" not in version_dict:
+ version_dict["openshift"] = version_dict["oc"]
+
+ return version_dict
+
+ @staticmethod
+ def add_custom_versions(versions):
+ ''' create custom versions strings '''
+
+ versions_dict = {}
+
+ for tech, version in versions.items():
+ # clean up "-" from version
+ if "-" in version:
+ version = version.split("-")[0]
+
+ if version.startswith('v'):
+ versions_dict[tech + '_numeric'] = version[1:].split('+')[0]
+ # "v3.3.0.33" is what we have, we want "3.3"
+ versions_dict[tech + '_short'] = version[1:4]
+
+ return versions_dict
+
+ @staticmethod
+ def openshift_installed():
+ ''' check if openshift is installed '''
+ import yum
+
+ yum_base = yum.YumBase()
+ if yum_base.rpmdb.searchNevra(name='atomic-openshift'):
+ return True
+
+ return False
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if key not in user_def:
+ if debug:
+ print('User data does not have key [%s]' % key)
+ print('User data: %s' % user_def)
+ return False
+
+ if not isinstance(user_def[key], list):
+ if debug:
+ print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
+ return False
+
+ if len(user_def[key]) != len(value):
+ if debug:
+ print("List lengths are not equal.")
+ print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
+ print("user_def: %s" % user_def[key])
+ print("value: %s" % value)
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print('sending list - list')
+ print(type(values[0]))
+ print(type(values[1]))
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print('list compare returned false')
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print('value should be identical')
+ print(value)
+ print(user_def[key])
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if key not in user_def:
+ if debug:
+ print("user_def does not have key [%s]" % key)
+ return False
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print("dict returned false: not instance of dict")
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print("keys are not equal in dict")
+ print(api_values)
+ print(user_values)
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print("dict returned false")
+ print(result)
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if key not in user_def or value != user_def[key]:
+ if debug:
+ print("value not equal; user_def does not have key")
+ print(key)
+ print(value)
+ if key in user_def:
+ print(user_def[key])
+ return False
+
+ if debug:
+ print('returning true')
+ return True
+
+
+class OpenShiftCLIConfig(object):
+ '''Generic Config'''
+ def __init__(self, rname, namespace, kubeconfig, options):
+ self.kubeconfig = kubeconfig
+ self.name = rname
+ self.namespace = namespace
+ self._options = options
+
+ @property
+ def config_options(self):
+ ''' return config options '''
+ return self._options
+
+ def to_option_list(self):
+ '''return all options as a string'''
+ return self.stringify()
+
+ def stringify(self):
+ ''' return the options hash as cli params in a string '''
+ rval = []
+ for key, data in self.config_options.items():
+ if data['include'] \
+ and (data['value'] or isinstance(data['value'], int)):
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
diff --git a/roles/lib_openshift/src/lib/import.py b/roles/lib_openshift/src/lib/import.py
new file mode 100644
index 000000000..c2b30e019
--- /dev/null
+++ b/roles/lib_openshift/src/lib/import.py
@@ -0,0 +1,17 @@
+# pylint: skip-file
+# flake8: noqa
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+# pylint: disable=too-many-lines
+
+from __future__ import print_function
+import atexit
+import json
+import os
+import re
+import shutil
+import subprocess
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+from ansible.module_utils.basic import AnsibleModule
diff --git a/roles/lib_openshift/src/lib/route.py b/roles/lib_openshift/src/lib/route.py
new file mode 100644
index 000000000..3130e7358
--- /dev/null
+++ b/roles/lib_openshift/src/lib/route.py
@@ -0,0 +1,123 @@
+# pylint: skip-file
+# flake8: noqa
+# noqa: E302,E301
+
+
+# pylint: disable=too-many-instance-attributes
+class RouteConfig(object):
+ ''' Handle route options '''
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ sname,
+ namespace,
+ kubeconfig,
+ destcacert=None,
+ cacert=None,
+ cert=None,
+ key=None,
+ host=None,
+ tls_termination=None,
+ service_name=None,
+ wildcard_policy=None,
+ weight=None):
+ ''' constructor for handling route options '''
+ self.kubeconfig = kubeconfig
+ self.name = sname
+ self.namespace = namespace
+ self.host = host
+ self.tls_termination = tls_termination
+ self.destcacert = destcacert
+ self.cacert = cacert
+ self.cert = cert
+ self.key = key
+ self.service_name = service_name
+ self.data = {}
+ self.wildcard_policy = wildcard_policy
+ if wildcard_policy is None:
+ self.wildcard_policy = 'None'
+ self.weight = weight
+ if weight is None:
+ self.weight = 100
+
+ self.create_dict()
+
+ def create_dict(self):
+ ''' return a service as a dict '''
+ self.data['apiVersion'] = 'v1'
+ self.data['kind'] = 'Route'
+ self.data['metadata'] = {}
+ self.data['metadata']['name'] = self.name
+ self.data['metadata']['namespace'] = self.namespace
+ self.data['spec'] = {}
+
+ self.data['spec']['host'] = self.host
+
+ if self.tls_termination:
+ self.data['spec']['tls'] = {}
+
+ if self.tls_termination == 'reencrypt':
+ self.data['spec']['tls']['destinationCACertificate'] = self.destcacert
+ self.data['spec']['tls']['key'] = self.key
+ self.data['spec']['tls']['caCertificate'] = self.cacert
+ self.data['spec']['tls']['certificate'] = self.cert
+ self.data['spec']['tls']['termination'] = self.tls_termination
+
+ self.data['spec']['to'] = {'kind': 'Service',
+ 'name': self.service_name,
+ 'weight': self.weight}
+
+ self.data['spec']['wildcardPolicy'] = self.wildcard_policy
+
+# pylint: disable=too-many-instance-attributes,too-many-public-methods
+class Route(Yedit):
+ ''' Class to wrap the oc command line tools '''
+ wildcard_policy = "spec.wildcardPolicy"
+ host_path = "spec.host"
+ service_path = "spec.to.name"
+ weight_path = "spec.to.weight"
+ cert_path = "spec.tls.certificate"
+ cacert_path = "spec.tls.caCertificate"
+ destcacert_path = "spec.tls.destinationCACertificate"
+ termination_path = "spec.tls.termination"
+ key_path = "spec.tls.key"
+ kind = 'route'
+
+ def __init__(self, content):
+ '''Route constructor'''
+ super(Route, self).__init__(content=content)
+
+ def get_destcacert(self):
+ ''' return cert '''
+ return self.get(Route.destcacert_path)
+
+ def get_cert(self):
+ ''' return cert '''
+ return self.get(Route.cert_path)
+
+ def get_key(self):
+ ''' return key '''
+ return self.get(Route.key_path)
+
+ def get_cacert(self):
+ ''' return cacert '''
+ return self.get(Route.cacert_path)
+
+ def get_service(self):
+ ''' return service name '''
+ return self.get(Route.service_path)
+
+ def get_weight(self):
+ ''' return service weight '''
+ return self.get(Route.weight_path)
+
+ def get_termination(self):
+ ''' return tls termination'''
+ return self.get(Route.termination_path)
+
+ def get_host(self):
+ ''' return host '''
+ return self.get(Route.host_path)
+
+ def get_wildcard_policy(self):
+ ''' return wildcardPolicy '''
+ return self.get(Route.wildcard_policy)
diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml
new file mode 100644
index 000000000..f1fd558d3
--- /dev/null
+++ b/roles/lib_openshift/src/sources.yml
@@ -0,0 +1,38 @@
+---
+oc_edit.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/edit
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_edit.py
+- ansible/oc_edit.py
+oc_obj.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/obj
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_obj.py
+- ansible/oc_obj.py
+oc_route.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/route
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- lib/route.py
+- class/oc_route.py
+- ansible/oc_route.py
+oc_version.py:
+- doc/generated
+- doc/license
+- lib/import.py
+- doc/version
+- ../../lib_utils/src/class/yedit.py
+- lib/base.py
+- class/oc_version.py
+- ansible/oc_version.py
diff --git a/roles/lib_openshift/src/test/integration/oc_route.yml b/roles/lib_openshift/src/test/integration/oc_route.yml
new file mode 100755
index 000000000..620d5d5e7
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_route.yml
@@ -0,0 +1,77 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_route.yml -M ../../../library -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: create route
+ oc_route:
+ name: test
+ namespace: default
+ tls_termination: edge
+ cert_content: testing cert
+ cacert_content: testing cacert
+ key_content: key content
+ service_name: test
+ host: test.example
+ register: routeout
+ - debug: var=routeout
+
+ - assert:
+ that: "routeout.results.results[0]['metadata']['name'] == 'test'"
+ msg: route create failed
+
+ - name: get route
+ oc_route:
+ state: list
+ name: test
+ namespace: default
+ register: routeout
+ - debug: var=routeout
+
+ - assert:
+ that: "routeout.results[0]['metadata']['name'] == 'test'"
+ msg: get route failed
+
+ - name: delete route
+ oc_route:
+ state: absent
+ name: test
+ namespace: default
+ register: routeout
+ - debug: var=routeout
+
+ - assert:
+ that: "routeout.results.returncode == 0"
+ msg: delete route failed
+
+ - name: create route
+ oc_route:
+ name: test
+ namespace: default
+ tls_termination: edge
+ cert_content: testing cert
+ cacert_content: testing cacert
+ key_content: testing key
+ service_name: test
+ host: test.example
+ register: routeout
+ - debug: var=routeout
+
+ - name: create route noop
+ oc_route:
+ name: test
+ namespace: default
+ tls_termination: edge
+ cert_content: testing cert
+ cacert_content: testing cacert
+ key_content: testing key
+ service_name: test
+ host: test.example
+ register: routeout
+ - debug: var=routeout
+
+ - assert:
+ that: "routeout.changed == False"
+ msg: Route create not idempotent
diff --git a/roles/lib_openshift/src/test/integration/oc_version.yml b/roles/lib_openshift/src/test/integration/oc_version.yml
new file mode 100755
index 000000000..52336d8da
--- /dev/null
+++ b/roles/lib_openshift/src/test/integration/oc_version.yml
@@ -0,0 +1,17 @@
+#!/usr/bin/ansible-playbook --module-path=../../../library/
+# ./oc_version.yml -e "cli_master_test=$OPENSHIFT_MASTER
+---
+- hosts: "{{ cli_master_test }}"
+ gather_facts: no
+ user: root
+ tasks:
+ - name: Get openshift version
+ oc_version:
+ register: versionout
+
+ - debug: var=versionout
+
+ - assert:
+ that:
+ - "'oc_numeric' in versionout.results.keys()"
+ msg: "Did not find 'oc_numeric' in version results."
diff --git a/roles/lib_openshift/src/test/unit/oc_version.py b/roles/lib_openshift/src/test/unit/oc_version.py
new file mode 100755
index 000000000..8d9128187
--- /dev/null
+++ b/roles/lib_openshift/src/test/unit/oc_version.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for oc version
+'''
+# To run
+# python -m unittest version
+#
+# .
+# Ran 1 test in 0.597s
+#
+# OK
+
+import os
+import sys
+import unittest
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error,wrong-import-position
+# place class in our python path
+module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, module_path)
+from oc_version import OCVersion # noqa: E402
+
+
+# pylint: disable=unused-argument
+def oc_cmd_mock(cmd, oadm=False, output=False, output_type='json', input_data=None):
+ '''mock command for openshift_cmd'''
+ version = '''oc v3.4.0.39
+kubernetes v1.4.0+776c994
+features: Basic-Auth GSSAPI Kerberos SPNEGO
+
+Server https://internal.api.opstest.openshift.com
+openshift v3.4.0.39
+kubernetes v1.4.0+776c994
+'''
+ if 'version' in cmd:
+ return {'stderr': None,
+ 'stdout': version,
+ 'returncode': 0,
+ 'results': version,
+ 'cmd': cmd}
+
+
+class OCVersionTest(unittest.TestCase):
+ '''
+ Test class for OCVersion
+ '''
+
+ def setUp(self):
+ ''' setup method will create a file and set to known configuration '''
+ self.oc_ver = OCVersion(None, False)
+ self.oc_ver.openshift_cmd = oc_cmd_mock
+
+ def test_get(self):
+ ''' Testing a get '''
+ results = self.oc_ver.get()
+ self.assertEqual(results['oc_short'], '3.4')
+ self.assertEqual(results['oc_numeric'], '3.4.0.39')
+ self.assertEqual(results['kubernetes_numeric'], '1.4.0')
+
+ def tearDown(self):
+ '''TearDown method'''
+ pass
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/lib_utils/library/yedit.py b/roles/lib_utils/library/yedit.py
new file mode 100644
index 000000000..6a5b40dcc
--- /dev/null
+++ b/roles/lib_utils/library/yedit.py
@@ -0,0 +1,774 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# pylint: disable=wrong-import-order
+import json
+import os
+import re
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+import shutil
+from ansible.module_utils.basic import AnsibleModule
+
+DOCUMENTATION = '''
+---
+module: yedit
+short_description: Create, modify, and idempotently manage yaml files.
+description:
+ - Modify yaml files programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list yaml
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ debug:
+ description:
+ - Turn on debug information.
+ required: false
+ default: false
+ aliases: []
+ src:
+ description:
+ - The file that is the target of the modifications.
+ required: false
+ default: None
+ aliases: []
+ content:
+ description:
+ - Content represents the yaml content you desire to work with. This
+ - could be the file contents to write or the inmemory data to modify.
+ required: false
+ default: None
+ aliases: []
+ content_type:
+ description:
+ - The python type of the content parameter.
+ required: false
+ default: 'dict'
+ aliases: []
+ key:
+ description:
+ - The path to the value you wish to modify. Emtpy string means the top of
+ - the document.
+ required: false
+ default: ''
+ aliases: []
+ value:
+ description:
+ - The incoming value of parameter 'key'.
+ required: false
+ default:
+ aliases: []
+ value_type:
+ description:
+ - The python type of the incoming value.
+ required: false
+ default: ''
+ aliases: []
+ update:
+ description:
+ - Whether the update should be performed on a dict/hash or list/array
+ - object.
+ required: false
+ default: false
+ aliases: []
+ append:
+ description:
+ - Whether to append to an array/list. When the key does not exist or is
+ - null, a new array is created. When the key is of a non-list type,
+ - nothing is done.
+ required: false
+ default: false
+ aliases: []
+ index:
+ description:
+ - Used in conjunction with the update parameter. This will update a
+ - specific index in an array/list.
+ required: false
+ default: false
+ aliases: []
+ curr_value:
+ description:
+ - Used in conjunction with the update parameter. This is the current
+ - value of 'key' in the yaml file.
+ required: false
+ default: false
+ aliases: []
+ curr_value_format:
+ description:
+ - Format of the incoming current value.
+ choices: ["yaml", "json", "str"]
+ required: false
+ default: false
+ aliases: []
+ backup:
+ description:
+ - Whether to make a backup copy of the current file when performing an
+ - edit.
+ required: false
+ default: true
+ aliases: []
+ separator:
+ description:
+ - The separator being used when parsing strings.
+ required: false
+ default: '.'
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+# Simple insert of key, value
+- name: insert simple key, value
+ yedit:
+ src: somefile.yml
+ key: test
+ value: somevalue
+ state: present
+# Results:
+# test: somevalue
+
+# Multilevel insert of key, value
+- name: insert simple key, value
+ yedit:
+ src: somefile.yml
+ key: a#b#c
+ value: d
+ state: present
+# Results:
+# a:
+# b:
+# c: d
+'''
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
+
+
+# pylint: disable=too-many-branches
+def main():
+ ''' ansible oc module for secrets '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ src=dict(default=None, type='str'),
+ content=dict(default=None),
+ content_type=dict(default='dict', choices=['dict']),
+ key=dict(default='', type='str'),
+ value=dict(),
+ value_type=dict(default='', type='str'),
+ update=dict(default=False, type='bool'),
+ append=dict(default=False, type='bool'),
+ index=dict(default=None, type='int'),
+ curr_value=dict(default=None, type='str'),
+ curr_value_format=dict(default='yaml',
+ choices=['yaml', 'json', 'str'],
+ type='str'),
+ backup=dict(default=True, type='bool'),
+ separator=dict(default='.', type='str'),
+ ),
+ mutually_exclusive=[["curr_value", "index"], ['update', "append"]],
+ required_one_of=[["content", "src"]],
+ )
+
+ rval = Yedit.run_ansible(module)
+ if 'failed' in rval and rval['failed']:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/src/ansible/yedit.py b/roles/lib_utils/src/ansible/yedit.py
new file mode 100644
index 000000000..8a1a7c2dc
--- /dev/null
+++ b/roles/lib_utils/src/ansible/yedit.py
@@ -0,0 +1,42 @@
+# flake8: noqa
+# pylint: skip-file
+
+
+# pylint: disable=too-many-branches
+def main():
+ ''' ansible oc module for secrets '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent', 'list']),
+ debug=dict(default=False, type='bool'),
+ src=dict(default=None, type='str'),
+ content=dict(default=None),
+ content_type=dict(default='dict', choices=['dict']),
+ key=dict(default='', type='str'),
+ value=dict(),
+ value_type=dict(default='', type='str'),
+ update=dict(default=False, type='bool'),
+ append=dict(default=False, type='bool'),
+ index=dict(default=None, type='int'),
+ curr_value=dict(default=None, type='str'),
+ curr_value_format=dict(default='yaml',
+ choices=['yaml', 'json', 'str'],
+ type='str'),
+ backup=dict(default=True, type='bool'),
+ separator=dict(default='.', type='str'),
+ ),
+ mutually_exclusive=[["curr_value", "index"], ['update', "append"]],
+ required_one_of=[["content", "src"]],
+ )
+
+ rval = Yedit.run_ansible(module)
+ if 'failed' in rval and rval['failed']:
+ module.fail_json(**rval)
+
+ module.exit_json(**rval)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/src/class/import.py b/roles/lib_utils/src/class/import.py
new file mode 100644
index 000000000..249e07228
--- /dev/null
+++ b/roles/lib_utils/src/class/import.py
@@ -0,0 +1,11 @@
+# flake8: noqa
+# pylint: skip-file
+
+# pylint: disable=wrong-import-order
+import json
+import os
+import re
+# pylint: disable=import-error
+import ruamel.yaml as yaml
+import shutil
+from ansible.module_utils.basic import AnsibleModule
diff --git a/roles/lib_utils/src/class/yedit.py b/roles/lib_utils/src/class/yedit.py
new file mode 100644
index 000000000..b1644f9b2
--- /dev/null
+++ b/roles/lib_utils/src/class/yedit.py
@@ -0,0 +1,566 @@
+# flake8: noqa
+# pylint: skip-file
+# noqa: E301,E302
+
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+
+# pylint: disable=too-many-public-methods
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
+ com_sep = set(['.', '#', '|', ':'])
+
+ # pylint: disable=too-many-arguments
+ def __init__(self,
+ filename=None,
+ content=None,
+ content_type='yaml',
+ separator='.',
+ backup=False):
+ self.content = content
+ self._separator = separator
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ self.backup = backup
+ self.load(content_type=self.content_type)
+ if self.__yaml_dict is None:
+ self.__yaml_dict = {}
+
+ @property
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @separator.setter
+ def separator(self):
+ ''' getter method for yaml_dict '''
+ return self._separator
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def parse_key(key, sep='.'):
+ '''parse the key allowing the appropriate separator'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ return re.findall(Yedit.re_key % ''.join(common_separators), key)
+
+ @staticmethod
+ def valid_key(key, sep='.'):
+ '''validate the incoming key'''
+ common_separators = list(Yedit.com_sep - set([sep]))
+ if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
+ return False
+
+ return True
+
+ @staticmethod
+ def remove_entry(data, key, sep='.'):
+ ''' remove data at location key '''
+ if key == '' and isinstance(data, dict):
+ data.clear()
+ return True
+ elif key == '' and isinstance(data, list):
+ del data[:]
+ return True
+
+ if not (key and Yedit.valid_key(key, sep)) and \
+ isinstance(data, (list, dict)):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a#b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
+ data = data[dict_key]
+ continue
+
+ elif data and not isinstance(data, dict):
+ return None
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ if key == '':
+ data = item
+
+ # process last index for add
+ # expected list entry
+ elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return data
+
+ @staticmethod
+ def get_entry(data, key, sep='.'):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if key == '':
+ pass
+ elif (not (key and Yedit.valid_key(key, sep)) and
+ isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = Yedit.parse_key(key, sep)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif (arr_ind and isinstance(data, list) and
+ int(arr_ind) <= len(data) - 1):
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ if self.backup and self.file_exists():
+ shutil.copy(self.filename, self.filename + '.orig')
+
+ tmp_filename = self.filename + '.yedit'
+ with open(tmp_filename, 'w') as yfd:
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+
+ yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
+
+ os.rename(tmp_filename, self.filename)
+
+ return (True, self.yaml_dict)
+
+ def read(self):
+ ''' read from file '''
+ # check if it exists
+ if self.filename is None or not self.file_exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def file_exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents and not self.content:
+ return None
+
+ if self.content:
+ if isinstance(self.content, dict):
+ self.yaml_dict = self.content
+ return self.yaml_dict
+ elif isinstance(self.content, str):
+ contents = self.content
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml' and contents:
+ self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ self.yaml_dict.fa.set_block_style()
+ elif content_type == 'json' and contents:
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as err:
+ # Error loading yaml or json
+ raise YeditException('Problem with loading yaml file. %s' % err)
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
+ except KeyError:
+ entry = None
+
+ return entry
+
+ def pop(self, path, key_or_item):
+ ''' remove a key, value pair from a dict or an item for a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if key_or_item in entry:
+ entry.pop(key_or_item)
+ return (True, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ try:
+ ind = entry.index(key_or_item)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ entry.pop(ind)
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ def delete(self, path):
+ ''' remove path from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def exists(self, path, value):
+ ''' check if value exists at path'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, list):
+ if value in entry:
+ return True
+ return False
+
+ elif isinstance(entry, dict):
+ if isinstance(value, dict):
+ rval = False
+ for key, val in value.items():
+ if entry[key] != val:
+ rval = False
+ break
+ else:
+ rval = True
+ return rval
+
+ return value in entry
+
+ return entry == value
+
+ def append(self, path, value):
+ '''append value to a list'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry is None:
+ self.put(path, [])
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ if not isinstance(entry, list):
+ return (False, self.yaml_dict)
+
+ # pylint: disable=no-member,maybe-no-member
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # pylint: disable=too-many-arguments
+ def update(self, path, value, index=None, curr_value=None):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if isinstance(entry, dict):
+ # pylint: disable=no-member,maybe-no-member
+ if not isinstance(value, dict):
+ raise YeditException('Cannot replace key, value entry in ' +
+ 'dict with non-dict type. value=[%s] [%s]' % (value, type(value))) # noqa: E501
+
+ entry.update(value)
+ return (True, self.yaml_dict)
+
+ elif isinstance(entry, list):
+ # pylint: disable=no-member,maybe-no-member
+ ind = None
+ if curr_value:
+ try:
+ ind = entry.index(curr_value)
+ except ValueError:
+ return (False, self.yaml_dict)
+
+ elif index is not None:
+ ind = index
+
+ if ind is not None and entry[ind] != value:
+ entry[ind] = value
+ return (True, self.yaml_dict)
+
+ # see if it exists in the list
+ try:
+ ind = entry.index(value)
+ except ValueError:
+ # doesn't exist, append it
+ entry.append(value)
+ return (True, self.yaml_dict)
+
+ # already exists, return
+ if ind is not None:
+ return (False, self.yaml_dict)
+ return (False, self.yaml_dict)
+
+ def put(self, path, value):
+ ''' put path, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
+ except KeyError:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
+ default_flow_style=False),
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if not result:
+ return (False, self.yaml_dict)
+
+ self.yaml_dict = tmp_copy
+
+ return (True, self.yaml_dict)
+
+ def create(self, path, value):
+ ''' create a yaml file '''
+ if not self.file_exists():
+ # deepcopy didn't work
+ tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), # noqa: E501
+ yaml.RoundTripLoader)
+ # pylint: disable=no-member
+ if hasattr(self.yaml_dict, 'fa'):
+ tmp_copy.fa.set_block_style()
+ result = Yedit.add_entry(tmp_copy, path, value, self.separator)
+ if result:
+ self.yaml_dict = tmp_copy
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+ @staticmethod
+ def get_curr_value(invalue, val_type):
+ '''return the current value'''
+ if invalue is None:
+ return None
+
+ curr_value = invalue
+ if val_type == 'yaml':
+ curr_value = yaml.load(invalue)
+ elif val_type == 'json':
+ curr_value = json.loads(invalue)
+
+ return curr_value
+
+ @staticmethod
+ def parse_value(inc_value, vtype=''):
+ '''determine value type passed'''
+ true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
+ 'on', 'On', 'ON', ]
+ false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
+ 'off', 'Off', 'OFF']
+
+ # It came in as a string but you didn't specify value_type as string
+ # we will convert to bool if it matches any of the above cases
+ if isinstance(inc_value, str) and 'bool' in vtype:
+ if inc_value not in true_bools and inc_value not in false_bools:
+ raise YeditException('Not a boolean type. str=[%s] vtype=[%s]'
+ % (inc_value, vtype))
+ elif isinstance(inc_value, bool) and 'str' in vtype:
+ inc_value = str(inc_value)
+
+ # If vtype is not str then go ahead and attempt to yaml load it.
+ if isinstance(inc_value, str) and 'str' not in vtype:
+ try:
+ inc_value = yaml.load(inc_value)
+ except Exception:
+ raise YeditException('Could not determine type of incoming ' +
+ 'value. value=[%s] vtype=[%s]'
+ % (type(inc_value), vtype))
+
+ return inc_value
+
+ # pylint: disable=too-many-return-statements,too-many-branches
+ @staticmethod
+ def run_ansible(module):
+ '''perform the idempotent crud operations'''
+ yamlfile = Yedit(filename=module.params['src'],
+ backup=module.params['backup'],
+ separator=module.params['separator'])
+
+ if module.params['src']:
+ rval = yamlfile.load()
+
+ if yamlfile.yaml_dict is None and \
+ module.params['state'] != 'present':
+ return {'failed': True,
+ 'msg': 'Error opening file [%s]. Verify that the ' +
+ 'file exists, that it is has correct' +
+ ' permissions, and is valid yaml.'}
+
+ if module.params['state'] == 'list':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['key']:
+ rval = yamlfile.get(module.params['key']) or {}
+
+ return {'changed': False, 'result': rval, 'state': "list"}
+
+ elif module.params['state'] == 'absent':
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+ yamlfile.yaml_dict = content
+
+ if module.params['update']:
+ rval = yamlfile.pop(module.params['key'],
+ module.params['value'])
+ else:
+ rval = yamlfile.delete(module.params['key'])
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0], 'result': rval[1], 'state': "absent"}
+
+ elif module.params['state'] == 'present':
+ # check if content is different than what is in the file
+ if module.params['content']:
+ content = Yedit.parse_value(module.params['content'],
+ module.params['content_type'])
+
+ # We had no edits to make and the contents are the same
+ if yamlfile.yaml_dict == content and \
+ module.params['value'] is None:
+ return {'changed': False,
+ 'result': yamlfile.yaml_dict,
+ 'state': "present"}
+
+ yamlfile.yaml_dict = content
+
+ # we were passed a value; parse it
+ if module.params['value']:
+ value = Yedit.parse_value(module.params['value'],
+ module.params['value_type'])
+ key = module.params['key']
+ if module.params['update']:
+ # pylint: disable=line-too-long
+ curr_value = Yedit.get_curr_value(Yedit.parse_value(module.params['curr_value']), # noqa: E501
+ module.params['curr_value_format']) # noqa: E501
+
+ rval = yamlfile.update(key, value, module.params['index'], curr_value) # noqa: E501
+
+ elif module.params['append']:
+ rval = yamlfile.append(key, value)
+ else:
+ rval = yamlfile.put(key, value)
+
+ if rval[0] and module.params['src']:
+ yamlfile.write()
+
+ return {'changed': rval[0],
+ 'result': rval[1], 'state': "present"}
+
+ # no edits to make
+ if module.params['src']:
+ # pylint: disable=redefined-variable-type
+ rval = yamlfile.write()
+ return {'changed': rval[0],
+ 'result': rval[1],
+ 'state': "present"}
+
+ return {'failed': True, 'msg': 'Unkown state passed'}
diff --git a/roles/lib_utils/src/doc/generated b/roles/lib_utils/src/doc/generated
new file mode 100644
index 000000000..054780313
--- /dev/null
+++ b/roles/lib_utils/src/doc/generated
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+# pylint: disable=missing-docstring
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
diff --git a/roles/lib_utils/src/doc/license b/roles/lib_utils/src/doc/license
new file mode 100644
index 000000000..717bb7f17
--- /dev/null
+++ b/roles/lib_utils/src/doc/license
@@ -0,0 +1,16 @@
+#
+# Copyright 2016 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
diff --git a/roles/lib_utils/src/doc/yedit b/roles/lib_utils/src/doc/yedit
new file mode 100644
index 000000000..16b44943e
--- /dev/null
+++ b/roles/lib_utils/src/doc/yedit
@@ -0,0 +1,138 @@
+# flake8: noqa
+# pylint: skip-file
+
+DOCUMENTATION = '''
+---
+module: yedit
+short_description: Create, modify, and idempotently manage yaml files.
+description:
+ - Modify yaml files programmatically.
+options:
+ state:
+ description:
+ - State represents whether to create, modify, delete, or list yaml
+ required: true
+ default: present
+ choices: ["present", "absent", "list"]
+ aliases: []
+ debug:
+ description:
+ - Turn on debug information.
+ required: false
+ default: false
+ aliases: []
+ src:
+ description:
+ - The file that is the target of the modifications.
+ required: false
+ default: None
+ aliases: []
+ content:
+ description:
+ - Content represents the yaml content you desire to work with. This
+ - could be the file contents to write or the inmemory data to modify.
+ required: false
+ default: None
+ aliases: []
+ content_type:
+ description:
+ - The python type of the content parameter.
+ required: false
+ default: 'dict'
+ aliases: []
+ key:
+ description:
+ - The path to the value you wish to modify. Emtpy string means the top of
+ - the document.
+ required: false
+ default: ''
+ aliases: []
+ value:
+ description:
+ - The incoming value of parameter 'key'.
+ required: false
+ default:
+ aliases: []
+ value_type:
+ description:
+ - The python type of the incoming value.
+ required: false
+ default: ''
+ aliases: []
+ update:
+ description:
+ - Whether the update should be performed on a dict/hash or list/array
+ - object.
+ required: false
+ default: false
+ aliases: []
+ append:
+ description:
+ - Whether to append to an array/list. When the key does not exist or is
+ - null, a new array is created. When the key is of a non-list type,
+ - nothing is done.
+ required: false
+ default: false
+ aliases: []
+ index:
+ description:
+ - Used in conjunction with the update parameter. This will update a
+ - specific index in an array/list.
+ required: false
+ default: false
+ aliases: []
+ curr_value:
+ description:
+ - Used in conjunction with the update parameter. This is the current
+ - value of 'key' in the yaml file.
+ required: false
+ default: false
+ aliases: []
+ curr_value_format:
+ description:
+ - Format of the incoming current value.
+ choices: ["yaml", "json", "str"]
+ required: false
+ default: false
+ aliases: []
+ backup:
+ description:
+ - Whether to make a backup copy of the current file when performing an
+ - edit.
+ required: false
+ default: true
+ aliases: []
+ separator:
+ description:
+ - The separator being used when parsing strings.
+ required: false
+ default: '.'
+ aliases: []
+author:
+- "Kenny Woodson <kwoodson@redhat.com>"
+extends_documentation_fragment: []
+'''
+
+EXAMPLES = '''
+# Simple insert of key, value
+- name: insert simple key, value
+ yedit:
+ src: somefile.yml
+ key: test
+ value: somevalue
+ state: present
+# Results:
+# test: somevalue
+
+# Multilevel insert of key, value
+- name: insert simple key, value
+ yedit:
+ src: somefile.yml
+ key: a#b#c
+ value: d
+ state: present
+# Results:
+# a:
+# b:
+# c: d
+'''
diff --git a/roles/lib_utils/src/generate.py b/roles/lib_utils/src/generate.py
new file mode 100755
index 000000000..6daade108
--- /dev/null
+++ b/roles/lib_utils/src/generate.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+'''
+ Generate the openshift-ansible/roles/lib_openshift_cli/library/ modules.
+'''
+
+import argparse
+import os
+import yaml
+import six
+
+OPENSHIFT_ANSIBLE_PATH = os.path.dirname(os.path.realpath(__file__))
+OPENSHIFT_ANSIBLE_SOURCES_PATH = os.path.join(OPENSHIFT_ANSIBLE_PATH, 'sources.yml') # noqa: E501
+LIBRARY = os.path.join(OPENSHIFT_ANSIBLE_PATH, '..', 'library/')
+
+
+class GenerateAnsibleException(Exception):
+ '''General Exception for generate function'''
+ pass
+
+
+def parse_args():
+ '''parse arguments to generate'''
+ parser = argparse.ArgumentParser(description="Generate ansible modules.")
+ parser.add_argument('--verify', action='store_true', default=False,
+ help='Verify library code matches the generated code.')
+
+ return parser.parse_args()
+
+
+def generate(parts):
+ '''generate the source code for the ansible modules'''
+
+ data = six.StringIO()
+ for fpart in parts:
+ # first line is pylint disable so skip it
+ with open(os.path.join(OPENSHIFT_ANSIBLE_PATH, fpart)) as pfd:
+ for idx, line in enumerate(pfd):
+ if idx in [0, 1] and 'flake8: noqa' in line or 'pylint: skip-file' in line: # noqa: E501
+ continue
+
+ data.write(line)
+
+ return data
+
+
+def get_sources():
+ '''return the path to the generate sources'''
+ return yaml.load(open(OPENSHIFT_ANSIBLE_SOURCES_PATH).read())
+
+
+def verify():
+ '''verify if the generated code matches the library code'''
+ for fname, parts in get_sources().items():
+ data = generate(parts)
+ fname = os.path.join(LIBRARY, fname)
+ if not open(fname).read() == data.getvalue():
+ raise GenerateAnsibleException('Generated content does not match for %s' % fname)
+
+
+def main():
+ ''' combine the necessary files to create the ansible module '''
+ args = parse_args()
+ if args.verify:
+ verify()
+
+ for fname, parts in get_sources().items():
+ data = generate(parts)
+ fname = os.path.join(LIBRARY, fname)
+ with open(fname, 'w') as afd:
+ afd.seek(0)
+ afd.write(data.getvalue())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/lib_utils/src/sources.yml b/roles/lib_utils/src/sources.yml
new file mode 100644
index 000000000..9cf3a0981
--- /dev/null
+++ b/roles/lib_utils/src/sources.yml
@@ -0,0 +1,8 @@
+---
+yedit.py:
+- doc/generated
+- doc/license
+- class/import.py
+- doc/yedit
+- class/yedit.py
+- ansible/yedit.py
diff --git a/roles/lib_utils/src/test/integration/files/kube-manager.yaml b/roles/lib_utils/src/test/integration/files/kube-manager.yaml
new file mode 100644
index 000000000..6f4b9e6dc
--- /dev/null
+++ b/roles/lib_utils/src/test/integration/files/kube-manager.yaml
@@ -0,0 +1,39 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: kube-controller-manager
+ namespace: kube-system
+spec:
+ hostNetwork: true
+ containers:
+ - name: kube-controller-manager
+ image: openshift/kube:v1.0.0
+ command:
+ - /hyperkube
+ - controller-manager
+ - --master=http://127.0.0.1:8080
+ - --leader-elect=true
+ - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
+ - --root-ca-file=/etc/kubernetes/ssl/ca.pem
+ livenessProbe:
+ httpGet:
+ host: 127.0.0.1
+ path: /healthz
+ port: 10252
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ volumeMounts:
+ - mountPath: /etc/kubernetes/ssl
+ name: ssl-certs-kubernetes
+ readOnly: true
+ - mountPath: /etc/ssl/certs
+ name: ssl-certs-host
+ readOnly: true
+ volumes:
+ - hostPath:
+ path: /etc/kubernetes/ssl
+ name: ssl-certs-kubernetes
+ - hostPath:
+ path: /usr/share/ca-certificates
+ name: ssl-certs-host
diff --git a/roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig b/roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig
new file mode 100644
index 000000000..5541c3dae
--- /dev/null
+++ b/roles/lib_utils/src/test/integration/kube-manager-test.yaml.orig
@@ -0,0 +1,52 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ name: kube-controller-manager
+ namespace: kube-system
+spec:
+ hostNetwork: true
+ containers:
+ - name: kube-controller-manager
+ image: openshift/kube:v1.0.0
+ command:
+ - /hyperkube
+ - controller-manager
+ - --master=http://127.0.0.1:8080
+ - --leader-elect=true
+ - --service-account-private-key-file=/etc/kubernetes/ssl/apiserver-key.pem
+ - --root-ca-file=/etc/k8s/ssl/my.pem
+ - --my-new-parameter=openshift
+ livenessProbe:
+ httpGet:
+ host: 127.0.0.1
+ path: /healthz
+ port: 10252
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ volumeMounts:
+ - mountPath: /etc/kubernetes/ssl
+ name: ssl-certs-kubernetes
+ readOnly: true
+ - mountPath: /etc/ssl/certs
+ name: ssl-certs-host
+ readOnly: 'true'
+ volumes:
+ - hostPath:
+ path: /etc/kubernetes/ssl
+ name: ssl-certs-kubernetes
+ - hostPath:
+ path: /usr/share/ca-certificates
+ name: ssl-certs-host
+yedittest: yedittest
+metadata-namespace: openshift-is-awesome
+nonexistingkey:
+- --my-new-parameter=openshift
+a:
+ b:
+ c: d
+e:
+ f:
+ g:
+ h:
+ i:
+ j: k
diff --git a/roles/lib_utils/src/test/integration/yedit_test.yml b/roles/lib_utils/src/test/integration/yedit_test.yml
new file mode 100755
index 000000000..1760a7466
--- /dev/null
+++ b/roles/lib_utils/src/test/integration/yedit_test.yml
@@ -0,0 +1,221 @@
+#!/usr/bin/ansible-playbook
+# Yedit test so that we can quickly determine if features are working
+# Ensure that the kube-manager.yaml file exists
+#
+# ./yedit_test.yml -M ../../library
+#
+---
+- hosts: localhost
+ gather_facts: no
+ vars:
+ test_file: kube-manager-test.yaml
+ test: test
+ strategy: debug
+
+ post_tasks:
+ - name: copy the kube-manager.yaml file so that we have a pristine copy each time
+ copy:
+ src: kube-manager.yaml
+ dest: "./{{ test_file }}"
+ changed_when: False
+
+ ####### add key to top level #####
+ - name: add a key at the top level
+ yedit:
+ src: "{{ test_file }}"
+ key: yedittest
+ value: yedittest
+
+ - name: retrieve the inserted key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: yedittest
+ register: results
+
+ - name: Assert that key is at top level
+ assert:
+ that: results.result == 'yedittest'
+ msg: 'Test: add a key to top level failed. yedittest != [{{ results.result }}]'
+ ###### end add key to top level #####
+
+ ###### modify multilevel key, value #####
+ - name: modify multilevel key, value
+ yedit:
+ src: "{{ test_file }}"
+ key: metadata-namespace
+ value: openshift-is-awesome
+ separator: '-'
+
+ - name: retrieve the inserted key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: metadata-namespace
+ separator: '-'
+ register: results
+
+ - name: Assert that key is as expected
+ assert:
+ that: results.result == 'openshift-is-awesome'
+ msg: 'Test: multilevel key, value modification: openshift-is-awesome != [{{ results.result }}]'
+ ###### end modify multilevel key, value #####
+
+ ###### test a string boolean #####
+ - name: test a string boolean
+ yedit:
+ src: "{{ test_file }}"
+ key: spec.containers[0].volumeMounts[1].readOnly
+ value: 'true'
+ value_type: str
+
+ - name: retrieve the inserted key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: spec.containers[0].volumeMounts[1].readOnly
+ register: results
+
+ - name: Assert that key is a string
+ assert:
+ that: results.result == "true"
+ msg: "Test: boolean str: 'true' != [{{ results.result }}]"
+
+ - name: Assert that key is not bool
+ assert:
+ that: results.result != true
+ msg: "Test: boolean str: true != [{{ results.result }}]"
+ ###### end test boolean string #####
+
+ ###### test array append #####
+ - name: test array append
+ yedit:
+ src: "{{ test_file }}"
+ key: spec.containers[0].command
+ value: --my-new-parameter=openshift
+ append: True
+
+ - name: retrieve the array
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: spec.containers[0].command
+ register: results
+
+ - name: Assert that the last element in array is our value
+ assert:
+ that: results.result[-1] == "--my-new-parameter=openshift"
+ msg: "Test: '--my-new-parameter=openshift' != [{{ results.result[-1] }}]"
+ ###### end test array append #####
+
+ ###### test non-existing array append #####
+ - name: test array append to non-existing key
+ yedit:
+ src: "{{ test_file }}"
+ key: nonexistingkey
+ value: --my-new-parameter=openshift
+ append: True
+
+ - name: retrieve the array
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: nonexistingkey
+ register: results
+
+ - name: Assert that the last element in array is our value
+ assert:
+ that: results.result[-1] == "--my-new-parameter=openshift"
+ msg: "Test: '--my-new-parameter=openshift' != [{{ results.result[-1] }}]"
+ ###### end test non-existing array append #####
+
+ ###### test array update modify #####
+ - name: test array update modify
+ yedit:
+ src: "{{ test_file }}"
+ key: spec.containers[0].command
+ value: --root-ca-file=/etc/k8s/ssl/my.pem
+ curr_value: --root-ca-file=/etc/kubernetes/ssl/ca.pem
+ curr_value_format: str
+ update: True
+
+ - name: retrieve the array
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: spec.containers[0].command
+ register: results
+
+ - name: Assert that the element in array is our value
+ assert:
+ that: results.result[5] == "--root-ca-file=/etc/k8s/ssl/my.pem"
+ msg: "Test: '--root-ca-file=/etc/k8s/ssl/my.pem' != [{{ results.result[5] }}]"
+ ###### end test array update modify#####
+
+ ###### test dict create #####
+ - name: test dict create
+ yedit:
+ src: "{{ test_file }}"
+ key: a.b.c
+ value: d
+
+ - name: retrieve the key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: a.b.c
+ register: results
+
+ - name: Assert that the key was created
+ assert:
+ that: results.result == "d"
+ msg: "Test: 'd' != [{{ results.result }}]"
+ ###### end test dict create #####
+
+ ###### test create dict value #####
+ - name: test create dict value
+ yedit:
+ src: "{{ test_file }}"
+ key: e.f.g
+ value:
+ h:
+ i:
+ j: k
+
+ - name: retrieve the key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: e.f.g.h.i.j
+ register: results
+
+ - name: Assert that the key was created
+ assert:
+ that: results.result == "k"
+ msg: "Test: 'k' != [{{ results.result }}]"
+ ###### end test dict create #####
+
+ ###### test create list value #####
+ - name: test create list value
+ yedit:
+ src: "{{ test_file }}"
+ key: z.x.y
+ value:
+ - 1
+ - 2
+ - 3
+
+ - name: retrieve the key
+ yedit:
+ src: "{{ test_file }}"
+ state: list
+ key: z#x#y
+ separator: '#'
+ register: results
+ - debug: var=results
+
+ - name: Assert that the key was created
+ assert:
+ that: results.result == [1, 2, 3]
+ msg: "Test: '[1, 2, 3]' != [{{ results.result }}]"
+###### end test create list value #####
diff --git a/roles/lib_utils/src/test/unit/yedit_test.py b/roles/lib_utils/src/test/unit/yedit_test.py
new file mode 100755
index 000000000..2793c5c1a
--- /dev/null
+++ b/roles/lib_utils/src/test/unit/yedit_test.py
@@ -0,0 +1,277 @@
+#!/usr/bin/env python2
+'''
+ Unit tests for yedit
+'''
+# To run
+# python -m unittest yedit_test
+#
+# .............................
+# ----------------------------------------------------------------------
+# Ran 29 tests in 0.133s
+# OK
+
+import os
+import sys
+import unittest
+
+# Removing invalid variable names for tests so that I can
+# keep them brief
+# pylint: disable=invalid-name,no-name-in-module
+# Disable import-error b/c our libraries aren't loaded in jenkins
+# pylint: disable=import-error
+# place yedit in our path
+yedit_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
+sys.path.insert(0, yedit_path)
+
+from yedit import Yedit # noqa: E402
+
+# pylint: disable=too-many-public-methods
+# Silly pylint, moar tests!
+
+
+class YeditTest(unittest.TestCase):
+ '''
+ Test class for yedit
+ '''
+ data = {'a': 'a',
+ 'b': {'c': {'d': [{'e': 'x'}, 'f', 'g']}},
+ } # noqa: E124
+
+ filename = 'yedit_test.yml'
+
+ def setUp(self):
+ ''' setup method will create a file and set to known configuration '''
+ yed = Yedit(YeditTest.filename)
+ yed.yaml_dict = YeditTest.data
+ yed.write()
+
+ def test_load(self):
+ ''' Testing a get '''
+ yed = Yedit('yedit_test.yml')
+ self.assertEqual(yed.yaml_dict, self.data)
+
+ def test_write(self):
+ ''' Testing a simple write '''
+ yed = Yedit('yedit_test.yml')
+ yed.put('key1', 1)
+ yed.write()
+ self.assertTrue('key1' in yed.yaml_dict)
+ self.assertEqual(yed.yaml_dict['key1'], 1)
+
+ def test_write_x_y_z(self):
+ '''Testing a write of multilayer key'''
+ yed = Yedit('yedit_test.yml')
+ yed.put('x.y.z', 'modified')
+ yed.write()
+ yed.load()
+ self.assertEqual(yed.get('x.y.z'), 'modified')
+
+ def test_delete_a(self):
+ '''Testing a simple delete '''
+ yed = Yedit('yedit_test.yml')
+ yed.delete('a')
+ yed.write()
+ yed.load()
+ self.assertTrue('a' not in yed.yaml_dict)
+
+ def test_delete_b_c(self):
+ '''Testing delete of layered key '''
+ yed = Yedit('yedit_test.yml', separator=':')
+ yed.delete('b:c')
+ yed.write()
+ yed.load()
+ self.assertTrue('b' in yed.yaml_dict)
+ self.assertFalse('c' in yed.yaml_dict['b'])
+
+ def test_create(self):
+ '''Testing a create '''
+ os.unlink(YeditTest.filename)
+ yed = Yedit('yedit_test.yml')
+ yed.create('foo', 'bar')
+ yed.write()
+ yed.load()
+ self.assertTrue('foo' in yed.yaml_dict)
+ self.assertTrue(yed.yaml_dict['foo'] == 'bar')
+
+ def test_create_content(self):
+ '''Testing a create with content '''
+ content = {"foo": "bar"}
+ yed = Yedit("yedit_test.yml", content)
+ yed.write()
+ yed.load()
+ self.assertTrue('foo' in yed.yaml_dict)
+ self.assertTrue(yed.yaml_dict['foo'], 'bar')
+
+ def test_array_insert(self):
+ '''Testing a create with content '''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', 'inject')
+ self.assertTrue(yed.get('b:c:d[0]') == 'inject')
+
+ def test_array_insert_first_index(self):
+ '''Testing a create with content '''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', 'inject')
+ self.assertTrue(yed.get('b:c:d[1]') == 'f')
+
+ def test_array_insert_second_index(self):
+ '''Testing a create with content '''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', 'inject')
+ self.assertTrue(yed.get('b:c:d[2]') == 'g')
+
+ def test_dict_array_dict_access(self):
+ '''Testing a create with content'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ self.assertTrue(yed.get('b:c:d[0]:[0]:x:y') == 'inject')
+
+ def test_dict_array_dict_replace(self):
+ '''Testing multilevel delete'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ yed.put('b:c:d[0]:[0]:x:y', 'testing')
+ self.assertTrue('b' in yed.yaml_dict)
+ self.assertTrue('c' in yed.yaml_dict['b'])
+ self.assertTrue('d' in yed.yaml_dict['b']['c'])
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
+ self.assertTrue('y' in yed.yaml_dict['b']['c']['d'][0][0]['x'])
+ self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x']['y'] == 'testing') # noqa: E501
+
+ def test_dict_array_dict_remove(self):
+ '''Testing multilevel delete'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ yed.delete('b:c:d[0]:[0]:x:y')
+ self.assertTrue('b' in yed.yaml_dict)
+ self.assertTrue('c' in yed.yaml_dict['b'])
+ self.assertTrue('d' in yed.yaml_dict['b']['c'])
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
+ self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
+ self.assertFalse('y' in yed.yaml_dict['b']['c']['d'][0][0]['x'])
+
+ def test_key_exists_in_dict(self):
+ '''Testing exist in dict'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ self.assertTrue(yed.exists('b:c', 'd'))
+
+ def test_key_exists_in_list(self):
+ '''Testing exist in list'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
+ self.assertTrue(yed.exists('b:c:d', [{'x': {'y': 'inject'}}]))
+ self.assertFalse(yed.exists('b:c:d', [{'x': {'y': 'test'}}]))
+
+ def test_update_to_list_with_index(self):
+ '''Testing update to list with index'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', [1, 2, 3])
+ yed.update('x:y:z', [5, 6], index=2)
+ self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
+ self.assertTrue(yed.exists('x:y:z', [5, 6]))
+ self.assertFalse(yed.exists('x:y:z', 4))
+
+ def test_update_to_list_with_curr_value(self):
+ '''Testing update to list with index'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', [1, 2, 3])
+ yed.update('x:y:z', [5, 6], curr_value=3)
+ self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
+ self.assertTrue(yed.exists('x:y:z', [5, 6]))
+ self.assertFalse(yed.exists('x:y:z', 4))
+
+ def test_update_to_list(self):
+ '''Testing update to list'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', [1, 2, 3])
+ yed.update('x:y:z', [5, 6])
+ self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6]])
+ self.assertTrue(yed.exists('x:y:z', [5, 6]))
+ self.assertFalse(yed.exists('x:y:z', 4))
+
+ def test_append_twice_to_list(self):
+ '''Testing append to list'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', [1, 2, 3])
+ yed.append('x:y:z', [5, 6])
+ yed.append('x:y:z', [5, 6])
+ self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6], [5, 6]])
+ self.assertTrue(2 == yed.get('x:y:z').count([5, 6]))
+ self.assertFalse(yed.exists('x:y:z', 4))
+
+ def test_add_item_to_dict(self):
+ '''Testing update to dict'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('x:y:z', {'a': 1, 'b': 2})
+ yed.update('x:y:z', {'c': 3, 'd': 4})
+ self.assertTrue(yed.get('x:y:z') == {'a': 1, 'b': 2, 'c': 3, 'd': 4})
+ self.assertTrue(yed.exists('x:y:z', {'c': 3}))
+
+ def test_first_level_dict_with_none_value(self):
+ '''test dict value with none value'''
+ yed = Yedit(content={'a': None}, separator=":")
+ yed.put('a:b:c', 'test')
+ self.assertTrue(yed.get('a:b:c') == 'test')
+ self.assertTrue(yed.get('a:b'), {'c': 'test'})
+
+ def test_adding_yaml_variable(self):
+ '''test dict value with none value'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('z:y', '{{test}}')
+ self.assertTrue(yed.get('z:y') == '{{test}}')
+
+ def test_keys_with_underscore(self):
+ '''test dict value with none value'''
+ yed = Yedit("yedit_test.yml", separator=':')
+ yed.put('z_:y_y', {'test': '{{test}}'})
+ self.assertTrue(yed.get('z_:y_y') == {'test': '{{test}}'})
+
+ def test_first_level_array_update(self):
+ '''test update on top level array'''
+ yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
+ yed.update('', {'c': 4})
+ self.assertTrue({'c': 4} in yed.get(''))
+
+ def test_first_level_array_delete(self):
+ '''test remove top level key'''
+ yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
+ yed.delete('')
+ self.assertTrue({'b': 3} not in yed.get(''))
+
+ def test_first_level_array_get(self):
+ '''test dict value with none value'''
+ yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
+ yed.get('')
+ self.assertTrue([{'a': 1}, {'b': 2}, {'b': 3}] == yed.yaml_dict)
+
+ def test_pop_list_item(self):
+ '''test dict value with none value'''
+ yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
+ yed.pop('', {'b': 2})
+ self.assertTrue([{'a': 1}, {'b': 3}] == yed.yaml_dict)
+
+ def test_pop_list_item_2(self):
+ '''test dict value with none value'''
+ z = range(10)
+ yed = Yedit(content=z, separator=':')
+ yed.pop('', 5)
+ z.pop(5)
+ self.assertTrue(z == yed.yaml_dict)
+
+ def test_pop_dict_key(self):
+ '''test dict value with none value'''
+ yed = Yedit(content={'a': {'b': {'c': 1, 'd': 2}}}, separator='#')
+ yed.pop('a#b', 'c')
+ self.assertTrue({'a': {'b': {'d': 2}}} == yed.yaml_dict)
+
+ def tearDown(self):
+ '''TearDown method'''
+ os.unlink(YeditTest.filename)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/roles/openshift_builddefaults/tasks/main.yml b/roles/openshift_builddefaults/tasks/main.yml
index 1f44b29b9..e0b51eee0 100644
--- a/roles/openshift_builddefaults/tasks/main.yml
+++ b/roles/openshift_builddefaults/tasks/main.yml
@@ -15,6 +15,7 @@
no_proxy: "{{ openshift_builddefaults_no_proxy | default(None) }}"
git_http_proxy: "{{ openshift_builddefaults_git_http_proxy | default(None) }}"
git_https_proxy: "{{ openshift_builddefaults_git_https_proxy | default(None) }}"
+ git_no_proxy: "{{ openshift_builddefaults_git_no_proxy | default(None) }}"
- name: Set builddefaults config structure
openshift_facts:
diff --git a/roles/openshift_builddefaults/vars/main.yml b/roles/openshift_builddefaults/vars/main.yml
index bcdf68112..fe6069ea9 100644
--- a/roles/openshift_builddefaults/vars/main.yml
+++ b/roles/openshift_builddefaults/vars/main.yml
@@ -6,16 +6,27 @@ builddefaults_yaml:
kind: BuildDefaultsConfig
gitHTTPProxy: "{{ openshift.builddefaults.git_http_proxy | default('', true) }}"
gitHTTPSProxy: "{{ openshift.builddefaults.git_https_proxy | default('', true) }}"
+ gitNoProxy: "{{ openshift.builddefaults.git_no_proxy | default('', true) }}"
env:
- name: HTTP_PROXY
value: "{{ openshift.builddefaults.http_proxy | default('', true) }}"
- name: HTTPS_PROXY
value: "{{ openshift.builddefaults.https_proxy | default('', true) }}"
- name: NO_PROXY
- value: "{{ openshift.builddefaults.no_proxy | default('', true) | join(',') }}"
+ value: "{{ openshift.builddefaults.no_proxy | default('', true) }}"
- name: http_proxy
value: "{{ openshift.builddefaults.http_proxy | default('', true) }}"
- name: https_proxy
value: "{{ openshift.builddefaults.https_proxy | default('', true) }}"
- name: no_proxy
- value: "{{ openshift.builddefaults.no_proxy | default('', true) | join(',') }}"
+ value: "{{ openshift.builddefaults.no_proxy | default('', true) }}"
+ imageLabels: "{{ openshift_builddefaults_image_labels | default(None) }}"
+ nodeSelector: "{{ openshift_builddefaults_nodeselectors | default(None) }}"
+ annotations: "{{ openshift_builddefaults_annotations | default(None) }}"
+ resources:
+ requests:
+ cpu: "{{ openshift_builddefaults_resources_requests_cpu | default(None) }}"
+ memory: "{{ openshift_builddefaults_resources_requests_memory | default(None) }}"
+ limits:
+ cpu: "{{ openshift_builddefaults_resources_limits_cpu | default(None) }}"
+ memory: "{{ openshift_builddefaults_resources_limits_memory | default(None) }}"
diff --git a/roles/openshift_buildoverrides/meta/main.yml b/roles/openshift_buildoverrides/meta/main.yml
new file mode 100644
index 000000000..e9d2e8712
--- /dev/null
+++ b/roles/openshift_buildoverrides/meta/main.yml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: Ben Parees
+ description: OpenShift Build Overrides configuration
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.9
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_buildoverrides/tasks/main.yml b/roles/openshift_buildoverrides/tasks/main.yml
new file mode 100644
index 000000000..87d0e6f21
--- /dev/null
+++ b/roles/openshift_buildoverrides/tasks/main.yml
@@ -0,0 +1,6 @@
+---
+- name: Set buildoverrides config structure
+ openshift_facts:
+ role: buildoverrides
+ local_facts:
+ config: "{{ openshift_buildoverrides_json | default(buildoverrides_yaml) }}"
diff --git a/roles/openshift_buildoverrides/vars/main.yml b/roles/openshift_buildoverrides/vars/main.yml
new file mode 100644
index 000000000..cf49a6ebf
--- /dev/null
+++ b/roles/openshift_buildoverrides/vars/main.yml
@@ -0,0 +1,11 @@
+---
+force_pull: "{{ openshift_buildoverrides_force_pull | default('') }}"
+buildoverrides_yaml:
+ BuildOverrides:
+ configuration:
+ apiVersion: v1
+ kind: BuildOverridesConfig
+ forcePull: "{{ '' if force_pull == '' else force_pull | bool }}"
+ imageLabels: "{{ openshift_buildoverrides_image_labels | default(None) }}"
+ nodeSelector: "{{ openshift_buildoverrides_nodeselectors | default(None) }}"
+ annotations: "{{ openshift_buildoverrides_annotations | default(None) }}"
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index e2a12e5ff..e21397170 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -86,7 +86,7 @@
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- --hostnames={{ openshift_master_hostnames | join(',') }}
+ --hostnames={{ openshift.common.all_hostnames | join(',') }}
--master={{ openshift.master.api_url }}
--public-master={{ openshift.master.public_api_url }}
--cert-dir={{ openshift_ca_config_dir }}
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index d44438332..327cc004b 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -1,5 +1,4 @@
-OpenShift Certificate Expiration Checker
-========================================
+# OpenShift Certificate Expiration Checker
OpenShift certificate expiration checking. Be warned of certificates
expiring within a configurable window of days, and notified of
@@ -9,7 +8,7 @@ include:
* Master/Node Service Certificates
* Router/Registry Service Certificates from etcd secrets
* Master/Node/Router/Registry/Admin `kubeconfig`s
-* Etcd certificates
+* Etcd certificates (including embedded)
This role pairs well with the redeploy certificates playbook:
@@ -21,8 +20,7 @@ cluster. For best results run `ansible-playbook` with the `-v` option.
-Role Variables
---------------
+# Role Variables
Core variables in this role:
@@ -42,8 +40,64 @@ Optional report/result saving variables in this role:
| `openshift_certificate_expiry_json_results_path` | `/tmp/cert-expiry-report.json` | The full path to save the json report as |
-Example Playbook
-----------------
+# Using this Role
+
+How to use the Certificate Expiration Checking Role.
+
+> **NOTE:** In the examples shown below, ensure you change **HOSTS**
+> to the path of your inventory file.
+
+## Run with ansible-playbook
+
+Run one of the example playbooks using an inventory file
+representative of your existing cluster. Some example playbooks are
+included in this repo, or you can read on below after this example to
+craft you own.
+
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml
+```
+
+Using the `easy-mode.yaml` playbook will produce:
+
+* Reports including healthy and unhealthy hosts
+* A JSON report in `/tmp/`
+* A stylized HTML report in `/tmp/`
+
+
+## More Example Playbooks
+
+> **Note:** These Playbooks are available to run directly out of the
+> [examples/playbooks/](examples/playbooks/) directory.
+
+
+This example playbook is great if you're just wanting to **try the
+role out**. This playbook enables HTML and JSON reports. The warning
+window is set very large so you will almost always get results back.
+All certificates (healthy or not) are included in the results:
+
+```yaml
+---
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ - role: openshift_certificate_expiry
+```
+
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml
+```
+
+> [View This Playbook](examples/playbooks/easy-mode.yaml)
+
+***
Default behavior:
@@ -57,6 +111,16 @@ Default behavior:
- role: openshift_certificate_expiry
```
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/default.yaml
+```
+
+
+> [View This Playbook](examples/playbooks/default.yaml)
+
+***
+
+
Generate HTML and JSON artifacts in their default paths:
```yaml
@@ -72,6 +136,15 @@ Generate HTML and JSON artifacts in their default paths:
- role: openshift_certificate_expiry
```
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml
+```
+
+
+> [View This Playbook](examples/playbooks/html_and_json_default_paths.yaml)
+
+***
+
Change the expiration warning window to 1500 days (good for testing
the module out):
@@ -87,6 +160,15 @@ the module out):
- role: openshift_certificate_expiry
```
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml
+```
+
+
+> [View This Playbook](examples/playbooks/longer_warning_period.yaml)
+
+***
+
Change the expiration warning window to 1500 days (good for testing
the module out) and save the results as a JSON file:
@@ -103,98 +185,157 @@ the module out) and save the results as a JSON file:
- role: openshift_certificate_expiry
```
+```
+$ ansible-playbook -v -i HOSTS ./roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml
+```
+
+
+> [View This Playbook](examples/playbooks/longer-warning-period-json-results.yaml)
+
+
-JSON Output
------------
+# Output Formats
+
+As noted above there are two ways to format your check report. In
+`json` format for machine parsing, or as a stylized `html` page for
+easy skimming. These options are shown below.
+
+## HTML Report
+
+![HTML Expiration Report](examples/cert-expiry-report-html.png)
+
+For an example of the HTML report you can browse, save
+[examples/cert-expiry-report.html](examples/cert-expiry-report.html)
+and then open the file in your browser.
+
+
+## JSON Report
There are two top-level keys in the saved JSON results, `data` and
`summary`.
The `data` key is a hash where the keys are the names of each host
-examined and the values are the check results for each respective
-host.
+examined and the values are the check results for the certificates
+identified on each respective host.
+
+The `summary` key is a hash that summarizes the total number of
+certificates:
+
+* examined on the entire cluster
+* OK
+* expiring within the configured warning window
+* already expired
-The `summary` key is a hash that summarizes the number of certificates
-expiring within the configured warning window and the number of
-already expired certificates.
+For an example of the full JSON report, see [examples/cert-expiry-report.json](examples/cert-expiry-report.json).
-The example below is abbreviated to save space:
+The example below is abbreviated to save space.
```json
{
- "data": {
- "192.168.124.148": {
- "etcd": [
- {
- "cert_cn": "CN:etcd-signer@1474563722",
- "days_remaining": 350,
- "expiry": "2017-09-22 17:02:25",
- "health": "warning",
- "path": "/etc/etcd/ca.crt"
- },
- ],
- "kubeconfigs": [
- {
- "cert_cn": "O:system:nodes, CN:system:node:m01.example.com",
- "days_remaining": 715,
- "expiry": "2018-09-22 17:08:57",
- "health": "warning",
- "path": "/etc/origin/node/system:node:m01.example.com.kubeconfig"
- },
- {
- "cert_cn": "O:system:cluster-admins, CN:system:admin",
- "days_remaining": 715,
- "expiry": "2018-09-22 17:04:40",
- "health": "warning",
- "path": "/etc/origin/master/admin.kubeconfig"
- }
- ],
- "meta": {
- "checked_at_time": "2016-10-07 15:26:47.608192",
- "show_all": "True",
- "warn_before_date": "2020-11-15 15:26:47.608192",
- "warning_days": 1500
- },
- "ocp_certs": [
- {
- "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148",
- "days_remaining": 715,
- "expiry": "2018-09-22 17:04:39",
- "health": "warning",
- "path": "/etc/origin/master/master.server.crt"
- },
- {
- "cert_cn": "CN:openshift-signer@1474563878",
- "days_remaining": 1810,
- "expiry": "2021-09-21 17:04:38",
- "health": "ok",
- "path": "/etc/origin/node/ca.crt"
- }
- ],
- "registry": [
- {
- "cert_cn": "CN:172.30.101.81, DNS:docker-registry-default.router.default.svc.cluster.local, DNS:docker-registry.default.svc.cluster.local, DNS:172.30.101.81, IP Address:172.30.101.81",
- "days_remaining": 728,
- "expiry": "2018-10-05 18:54:29",
- "health": "warning",
- "path": "/api/v1/namespaces/default/secrets/registry-certificates"
- }
- ],
- "router": [
- {
- "cert_cn": "CN:router.default.svc, DNS:router.default.svc, DNS:router.default.svc.cluster.local",
- "days_remaining": 715,
- "expiry": "2018-09-22 17:48:23",
- "health": "warning",
- "path": "/api/v1/namespaces/default/secrets/router-certs"
- }
- ]
+ "data": {
+ "m01.example.com": {
+ "etcd": [
+ {
+ "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc,...",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/etcd.server.crt",
+ "serial": 7,
+ "serial_hex": "0x7"
+ }
+ ],
+ "kubeconfigs": [
+ {
+ "cert_cn": "O:system:nodes, CN:system:node:m01.example.com",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:28",
+ "health": "warning",
+ "path": "/etc/origin/node/system:node:m01.example.com.kubeconfig",
+ "serial": 11,
+ "serial_hex": "0xb"
+ }
+ ],
+ "meta": {
+ "checked_at_time": "2017-01-17 10:36:25.230920",
+ "show_all": "True",
+ "warn_before_date": "2021-02-25 10:36:25.230920",
+ "warning_days": 1500
+ },
+ "ocp_certs": [
+ {
+ "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc,...",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:02",
+ "health": "warning",
+ "path": "/etc/origin/master/master.server.crt",
+ "serial": 4,
+ "serial_hex": "0x4"
}
+ ],
+ "registry": [
+ {
+ "cert_cn": "CN:172.30.242.251, DNS:docker-registry-default.router.default.svc.cluster.local,...",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:05:54",
+ "health": "warning",
+ "path": "/api/v1/namespaces/default/secrets/registry-certificates",
+ "serial": 13,
+ "serial_hex": "0xd"
+ }
+ ],
+ "router": [
+ {
+ "cert_cn": "CN:router.default.svc, DNS:router.default.svc, DNS:router.default.svc.cluster.local",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:05:46",
+ "health": "warning",
+ "path": "/api/v1/namespaces/default/secrets/router-certs",
+ "serial": 5050662940948454653,
+ "serial_hex": "0x46178f2f6b765cfd"
+ }
+ ]
},
- "summary": {
- "warning": 6,
- "expired": 0
+ "n01.example.com": {
+ "etcd": [],
+ "kubeconfigs": [
+ {
+ "cert_cn": "O:system:nodes, CN:system:node:n01.example.com",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:28",
+ "health": "warning",
+ "path": "/etc/origin/node/system:node:n01.example.com.kubeconfig",
+ "serial": 11,
+ "serial_hex": "0xb"
+ }
+ ],
+ "meta": {
+ "checked_at_time": "2017-01-17 10:36:25.217103",
+ "show_all": "True",
+ "warn_before_date": "2021-02-25 10:36:25.217103",
+ "warning_days": 1500
+ },
+ "ocp_certs": [
+ {
+ "cert_cn": "CN:192.168.124.11, DNS:n01.example.com, DNS:192.168.124.11, IP Address:192.168.124.11",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:29",
+ "health": "warning",
+ "path": "/etc/origin/node/server.crt",
+ "serial": 12,
+ "serial_hex": "0xc"
+ }
+ ],
+ "registry": [],
+ "router": []
}
+ },
+ "summary": {
+ "expired": 0,
+ "ok": 3,
+ "total": 15,
+ "warning": 12
+ }
}
```
@@ -227,24 +368,17 @@ $ jq '.summary.warning,.summary.expired' /tmp/cert-expiry-report.json
```
-Requirements
-------------
-
+# Requirements
* None
-Dependencies
-------------
-
+# Dependencies
* None
-License
--------
-
+# License
Apache License, Version 2.0
-Author Information
-------------------
+# Author Information
Tim Bielawa (tbielawa@redhat.com)
diff --git a/roles/openshift_certificate_expiry/examples/cert-expiry-report-html.png b/roles/openshift_certificate_expiry/examples/cert-expiry-report-html.png
new file mode 100644
index 000000000..799131659
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/cert-expiry-report-html.png
Binary files differ
diff --git a/roles/openshift_certificate_expiry/examples/cert-expiry-report.html b/roles/openshift_certificate_expiry/examples/cert-expiry-report.html
new file mode 100644
index 000000000..db03a5060
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/cert-expiry-report.html
@@ -0,0 +1,396 @@
+<!DOCTYPE html>
+<html>
+ <head>
+ <meta charset="UTF-8" />
+ <title>OCP Certificate Expiry Report</title>
+ <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" />
+ <link href="https://fonts.googleapis.com/css?family=Source+Sans+Pro:300,400,700" rel="stylesheet" />
+ <style type="text/css">
+ body {
+ font-family: 'Source Sans Pro', sans-serif;
+ margin-left: 50px;
+ margin-right: 50px;
+ margin-bottom: 20px;
+ padding-top: 70px;
+ }
+ table {
+ border-collapse: collapse;
+ margin-bottom: 20px;
+ }
+ table, th, td {
+ border: 1px solid black;
+ }
+ th, td {
+ padding: 5px;
+ }
+ .cert-kind {
+ margin-top: 5px;
+ margin-bottom: 5px;
+ }
+ footer {
+ font-size: small;
+ text-align: center;
+ }
+ tr.odd {
+ background-color: #f2f2f2;
+ }
+ </style>
+ </head>
+ <body>
+ <nav class="navbar navbar-default navbar-fixed-top">
+ <div class="container-fluid">
+ <div class="navbar-header">
+ <a class="navbar-brand" href="#">OCP Certificate Expiry Report</a>
+ </div>
+ <div class="collapse navbar-collapse">
+ <p class="navbar-text navbar-right">
+ <button>
+ <a href="https://docs.openshift.com/container-platform/latest/install_config/redeploying_certificates.html"
+ target="_blank"
+ class="navbar-link">
+ <i class="glyphicon glyphicon-book"></i> Redeploying Certificates
+ </a>
+ </button>
+ <button>
+ <a href="https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_certificate_expiry"
+ target="_blank"
+ class="navbar-link">
+ <i class="glyphicon glyphicon-book"></i> Expiry Role Documentation
+ </a>
+ </button>
+ </p>
+ </div>
+ </div>
+ </nav>
+
+ <h1>m01.example.com</h1>
+
+ <p>
+ Checked 12 total certificates. Expired/Warning/OK: 0/10/2. Warning window: 1500 days
+ </p>
+ <ul>
+ <li><b>Expirations checked at:</b> 2017-01-17 10:36:25.230920</li>
+ <li><b>Warn after date:</b> 2021-02-25 10:36:25.230920</li>
+ </ul>
+
+ <table border="1" width="100%">
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">ocp_certs</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148</td>
+ <td><code>int(4)/hex(0x4)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:02</td>
+ <td>/etc/origin/master/master.server.crt</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:192.168.124.148, DNS:m01.example.com, DNS:192.168.124.148, IP Address:192.168.124.148</td>
+ <td><code>int(12)/hex(0xc)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:03:29</td>
+ <td>/etc/origin/node/server.crt</td>
+ </tr>
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-ok"></i></td>
+ <td style="width:33%">CN:openshift-signer@1483981200</td>
+ <td><code>int(1)/hex(0x1)</code></td>
+ <td>ok</td>
+ <td>1817</td>
+ <td>2022-01-08 17:00:01</td>
+ <td>/etc/origin/master/ca.crt</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-ok"></i></td>
+ <td style="width:33%">CN:openshift-signer@1483981200</td>
+ <td><code>int(1)/hex(0x1)</code></td>
+ <td>ok</td>
+ <td>1817</td>
+ <td>2022-01-08 17:00:01</td>
+ <td>/etc/origin/node/ca.crt</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">etcd</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148</td>
+ <td><code>int(7)/hex(0x7)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:03</td>
+ <td>/etc/origin/master/etcd.server.crt</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">kubeconfigs</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:nodes, CN:system:node:m01.example.com</td>
+ <td><code>int(11)/hex(0xb)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:03:28</td>
+ <td>/etc/origin/node/system:node:m01.example.com.kubeconfig</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:cluster-admins, CN:system:admin</td>
+ <td><code>int(8)/hex(0x8)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:03</td>
+ <td>/etc/origin/master/admin.kubeconfig</td>
+ </tr>
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:masters, CN:system:openshift-master</td>
+ <td><code>int(3)/hex(0x3)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:02</td>
+ <td>/etc/origin/master/openshift-master.kubeconfig</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:routers, CN:system:openshift-router</td>
+ <td><code>int(9)/hex(0x9)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:03</td>
+ <td>/etc/origin/master/openshift-router.kubeconfig</td>
+ </tr>
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:registries, CN:system:openshift-registry</td>
+ <td><code>int(10)/hex(0xa)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:00:03</td>
+ <td>/etc/origin/master/openshift-registry.kubeconfig</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">router</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:router.default.svc, DNS:router.default.svc, DNS:router.default.svc.cluster.local</td>
+ <td><code>int(5050662940948454653)/hex(0x46178f2f6b765cfd)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:05:46</td>
+ <td>/api/v1/namespaces/default/secrets/router-certs</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">registry</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:172.30.242.251, DNS:docker-registry-default.router.default.svc.cluster.local, DNS:docker-registry.default.svc.cluster.local, DNS:172.30.242.251, IP Address:172.30.242.251</td>
+ <td><code>int(13)/hex(0xd)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:05:54</td>
+ <td>/api/v1/namespaces/default/secrets/registry-certificates</td>
+ </tr>
+ </table>
+ <hr />
+ <h1>n01.example.com</h1>
+
+ <p>
+ Checked 3 total certificates. Expired/Warning/OK: 0/2/1. Warning window: 1500 days
+ </p>
+ <ul>
+ <li><b>Expirations checked at:</b> 2017-01-17 10:36:25.217103</li>
+ <li><b>Warn after date:</b> 2021-02-25 10:36:25.217103</li>
+ </ul>
+
+ <table border="1" width="100%">
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">ocp_certs</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">CN:192.168.124.11, DNS:n01.example.com, DNS:192.168.124.11, IP Address:192.168.124.11</td>
+ <td><code>int(12)/hex(0xc)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:03:29</td>
+ <td>/etc/origin/node/server.crt</td>
+ </tr>
+
+ <tr class="even">
+ <td style="text-align:center"><i class="glyphicon glyphicon-ok"></i></td>
+ <td style="width:33%">CN:openshift-signer@1483981200</td>
+ <td><code>int(1)/hex(0x1)</code></td>
+ <td>ok</td>
+ <td>1817</td>
+ <td>2022-01-08 17:00:01</td>
+ <td>/etc/origin/node/ca.crt</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">etcd</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">kubeconfigs</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+
+ <tr class="odd">
+ <td style="text-align:center"><i class="glyphicon glyphicon-alert"></i></td>
+ <td style="width:33%">O:system:nodes, CN:system:node:n01.example.com</td>
+ <td><code>int(11)/hex(0xb)</code></td>
+ <td>warning</td>
+ <td>722</td>
+ <td>2019-01-09 17:03:28</td>
+ <td>/etc/origin/node/system:node:n01.example.com.kubeconfig</td>
+ </tr>
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">router</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+ <tr>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">registry</h2></th>
+ </tr>
+
+ <tr>
+ <th>&nbsp;</th>
+ <th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
+ <th>Health</th>
+ <th>Days Remaining</th>
+ <th>Expiration Date</th>
+ <th>Path</th>
+ </tr>
+
+ </table>
+ <hr />
+
+ <footer>
+ <p>
+ Expiration report generated by
+ the <a href="https://github.com/openshift/openshift-ansible"
+ target="_blank">openshift-ansible</a>
+ <a href="https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_certificate_expiry"
+ target="_blank">certificate expiry</a> role.
+ </p>
+ <p>
+ Status icons from bootstrap/glyphicon
+ </p>
+ </footer>
+ </body>
+</html>
diff --git a/roles/openshift_certificate_expiry/examples/cert-expiry-report.json b/roles/openshift_certificate_expiry/examples/cert-expiry-report.json
new file mode 100644
index 000000000..8206e2842
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/cert-expiry-report.json
@@ -0,0 +1,178 @@
+{
+ "data": {
+ "m01.example.com": {
+ "etcd": [
+ {
+ "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/etcd.server.crt",
+ "serial": 7,
+ "serial_hex": "0x7"
+ }
+ ],
+ "kubeconfigs": [
+ {
+ "cert_cn": "O:system:nodes, CN:system:node:m01.example.com",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:28",
+ "health": "warning",
+ "path": "/etc/origin/node/system:node:m01.example.com.kubeconfig",
+ "serial": 11,
+ "serial_hex": "0xb"
+ },
+ {
+ "cert_cn": "O:system:cluster-admins, CN:system:admin",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/admin.kubeconfig",
+ "serial": 8,
+ "serial_hex": "0x8"
+ },
+ {
+ "cert_cn": "O:system:masters, CN:system:openshift-master",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:02",
+ "health": "warning",
+ "path": "/etc/origin/master/openshift-master.kubeconfig",
+ "serial": 3,
+ "serial_hex": "0x3"
+ },
+ {
+ "cert_cn": "O:system:routers, CN:system:openshift-router",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/openshift-router.kubeconfig",
+ "serial": 9,
+ "serial_hex": "0x9"
+ },
+ {
+ "cert_cn": "O:system:registries, CN:system:openshift-registry",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:03",
+ "health": "warning",
+ "path": "/etc/origin/master/openshift-registry.kubeconfig",
+ "serial": 10,
+ "serial_hex": "0xa"
+ }
+ ],
+ "meta": {
+ "checked_at_time": "2017-01-17 10:36:25.230920",
+ "show_all": "True",
+ "warn_before_date": "2021-02-25 10:36:25.230920",
+ "warning_days": 1500
+ },
+ "ocp_certs": [
+ {
+ "cert_cn": "CN:172.30.0.1, DNS:kubernetes, DNS:kubernetes.default, DNS:kubernetes.default.svc, DNS:kubernetes.default.svc.cluster.local, DNS:m01.example.com, DNS:openshift, DNS:openshift.default, DNS:openshift.default.svc, DNS:openshift.default.svc.cluster.local, DNS:172.30.0.1, DNS:192.168.124.148, IP Address:172.30.0.1, IP Address:192.168.124.148",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:00:02",
+ "health": "warning",
+ "path": "/etc/origin/master/master.server.crt",
+ "serial": 4,
+ "serial_hex": "0x4"
+ },
+ {
+ "cert_cn": "CN:192.168.124.148, DNS:m01.example.com, DNS:192.168.124.148, IP Address:192.168.124.148",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:29",
+ "health": "warning",
+ "path": "/etc/origin/node/server.crt",
+ "serial": 12,
+ "serial_hex": "0xc"
+ },
+ {
+ "cert_cn": "CN:openshift-signer@1483981200",
+ "days_remaining": 1817,
+ "expiry": "2022-01-08 17:00:01",
+ "health": "ok",
+ "path": "/etc/origin/master/ca.crt",
+ "serial": 1,
+ "serial_hex": "0x1"
+ },
+ {
+ "cert_cn": "CN:openshift-signer@1483981200",
+ "days_remaining": 1817,
+ "expiry": "2022-01-08 17:00:01",
+ "health": "ok",
+ "path": "/etc/origin/node/ca.crt",
+ "serial": 1,
+ "serial_hex": "0x1"
+ }
+ ],
+ "registry": [
+ {
+ "cert_cn": "CN:172.30.242.251, DNS:docker-registry-default.router.default.svc.cluster.local, DNS:docker-registry.default.svc.cluster.local, DNS:172.30.242.251, IP Address:172.30.242.251",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:05:54",
+ "health": "warning",
+ "path": "/api/v1/namespaces/default/secrets/registry-certificates",
+ "serial": 13,
+ "serial_hex": "0xd"
+ }
+ ],
+ "router": [
+ {
+ "cert_cn": "CN:router.default.svc, DNS:router.default.svc, DNS:router.default.svc.cluster.local",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:05:46",
+ "health": "warning",
+ "path": "/api/v1/namespaces/default/secrets/router-certs",
+ "serial": 5050662940948454653,
+ "serial_hex": "0x46178f2f6b765cfd"
+ }
+ ]
+ },
+ "n01.example.com": {
+ "etcd": [],
+ "kubeconfigs": [
+ {
+ "cert_cn": "O:system:nodes, CN:system:node:n01.example.com",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:28",
+ "health": "warning",
+ "path": "/etc/origin/node/system:node:n01.example.com.kubeconfig",
+ "serial": 11,
+ "serial_hex": "0xb"
+ }
+ ],
+ "meta": {
+ "checked_at_time": "2017-01-17 10:36:25.217103",
+ "show_all": "True",
+ "warn_before_date": "2021-02-25 10:36:25.217103",
+ "warning_days": 1500
+ },
+ "ocp_certs": [
+ {
+ "cert_cn": "CN:192.168.124.11, DNS:n01.example.com, DNS:192.168.124.11, IP Address:192.168.124.11",
+ "days_remaining": 722,
+ "expiry": "2019-01-09 17:03:29",
+ "health": "warning",
+ "path": "/etc/origin/node/server.crt",
+ "serial": 12,
+ "serial_hex": "0xc"
+ },
+ {
+ "cert_cn": "CN:openshift-signer@1483981200",
+ "days_remaining": 1817,
+ "expiry": "2022-01-08 17:00:01",
+ "health": "ok",
+ "path": "/etc/origin/node/ca.crt",
+ "serial": 1,
+ "serial_hex": "0x1"
+ }
+ ],
+ "registry": [],
+ "router": []
+ }
+ },
+ "summary": {
+ "expired": 0,
+ "ok": 3,
+ "total": 15,
+ "warning": 12
+ }
+}
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/default.yaml b/roles/openshift_certificate_expiry/examples/playbooks/default.yaml
new file mode 100644
index 000000000..630135cae
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/default.yaml
@@ -0,0 +1,10 @@
+---
+# Default behavior, you will need to ensure you run ansible with the
+# -v option to see report results:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml b/roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml
new file mode 100644
index 000000000..d0209426f
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/easy-mode.yaml
@@ -0,0 +1,21 @@
+---
+# This example playbook is great if you're just wanting to try the
+# role out.
+#
+# This example enables HTML and JSON reports
+#
+# The warning window is set very large so you will almost always get results back
+#
+# All certificates (healthy or not) are included in the results
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml b/roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml
new file mode 100644
index 000000000..d80cb6ff4
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/html_and_json_default_paths.yaml
@@ -0,0 +1,12 @@
+---
+# Generate HTML and JSON artifacts in their default paths:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_save_json_results: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml b/roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml
new file mode 100644
index 000000000..87a0f3be4
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/longer-warning-period-json-results.yaml
@@ -0,0 +1,13 @@
+---
+# Change the expiration warning window to 1500 days (good for testing
+# the module out) and save the results as a JSON file:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ openshift_certificate_expiry_save_json_results: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml b/roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml
new file mode 100644
index 000000000..960457c4b
--- /dev/null
+++ b/roles/openshift_certificate_expiry/examples/playbooks/longer_warning_period.yaml
@@ -0,0 +1,12 @@
+---
+# Change the expiration warning window to 1500 days (good for testing
+# the module out):
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
index bedd23fe8..5f102e960 100644
--- a/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/filter_plugins/oo_cert_expiry.py
@@ -51,9 +51,13 @@ Example playbook usage:
total_warnings = sum([hostvars[h]['check_results']['summary']['warning'] for h in play_hosts])
total_expired = sum([hostvars[h]['check_results']['summary']['expired'] for h in play_hosts])
+ total_ok = sum([hostvars[h]['check_results']['summary']['ok'] for h in play_hosts])
+ total_total = sum([hostvars[h]['check_results']['summary']['total'] for h in play_hosts])
json_result['summary']['warning'] = total_warnings
json_result['summary']['expired'] = total_expired
+ json_result['summary']['ok'] = total_ok
+ json_result['summary']['total'] = total_total
return json_result
diff --git a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
index e838eb2d4..85671b164 100644
--- a/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
+++ b/roles/openshift_certificate_expiry/library/openshift_cert_expiry.py
@@ -4,17 +4,13 @@
"""For details on this module see DOCUMENTATION (below)"""
-# router/registry cert grabbing
-import subprocess
-# etcd config file
-import ConfigParser
-# Expiration parsing
import datetime
-# File path stuff
import os
-# Config file parsing
+import subprocess
+
+from six.moves import configparser
+
import yaml
-# Certificate loading
import OpenSSL.crypto
DOCUMENTATION = '''
@@ -126,6 +122,8 @@ A 3-tuple of the form: (certificate_common_name, certificate_expiry_date, certif
cert_loaded = OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, _cert_string)
+ cert_serial = cert_loaded.get_serial_number()
+
######################################################################
# Read all possible names from the cert
cert_subjects = []
@@ -182,7 +180,7 @@ A 3-tuple of the form: (certificate_common_name, certificate_expiry_date, certif
time_remaining = cert_expiry_date - now
- return (cert_subject, cert_expiry_date, time_remaining)
+ return (cert_subject, cert_expiry_date, time_remaining, cert_serial)
def classify_cert(cert_meta, now, time_remaining, expire_window, cert_list):
@@ -214,6 +212,7 @@ Return:
cert_meta['health'] = 'ok'
cert_meta['expiry'] = expiry_str
+ cert_meta['serial_hex'] = hex(int(cert_meta['serial']))
cert_list.append(cert_meta)
return cert_list
@@ -260,7 +259,10 @@ Return:
# This is our module MAIN function after all, so there's bound to be a
# lot of code bundled up into one block
#
-# pylint: disable=too-many-locals,too-many-locals,too-many-statements,too-many-branches
+# Reason: These checks are disabled because the issue was introduced
+# during a period where the pylint checks weren't enabled for this file
+# Status: temporarily disabled pending future refactoring
+# pylint: disable=too-many-locals,too-many-statements,too-many-branches
def main():
"""This module examines certificates (in various forms) which compose
an OpenShift Container Platform cluster
@@ -371,10 +373,13 @@ an OpenShift Container Platform cluster
######################################################################
# Load the certificate and the CA, parse their expiration dates into
# datetime objects so we can manipulate them later
- for _, v in cert_meta.iteritems():
+ for _, v in cert_meta.items():
with open(v, 'r') as fp:
cert = fp.read()
- cert_subject, cert_expiry_date, time_remaining = load_and_handle_cert(cert, now)
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(cert, now)
expire_check_result = {
'cert_cn': cert_subject,
@@ -382,6 +387,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, ocp_certs)
@@ -421,7 +427,8 @@ an OpenShift Container Platform cluster
c = cfg['users'][0]['user']['client-certificate-data']
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(c, now, base64decode=True)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
@@ -429,6 +436,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
@@ -449,7 +457,8 @@ an OpenShift Container Platform cluster
c = cfg['users'][0]['user']['client-certificate-data']
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(c, now, base64decode=True)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
@@ -457,6 +466,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, kubeconfigs)
@@ -467,7 +477,11 @@ an OpenShift Container Platform cluster
######################################################################
# Check etcd certs
+ #
+ # Two things to check: 'external' etcd, and embedded etcd.
######################################################################
+ # FIRST: The 'external' etcd
+ #
# Some values may be duplicated, make this a set for now so we
# unique them all
etcd_certs_to_check = set([])
@@ -475,13 +489,17 @@ an OpenShift Container Platform cluster
etcd_cert_params.append('dne')
try:
with open('/etc/etcd/etcd.conf', 'r') as fp:
- etcd_config = ConfigParser.ConfigParser()
+ etcd_config = configparser.ConfigParser()
+ # Reason: This check is disabled because the issue was introduced
+ # during a period where the pylint checks weren't enabled for this file
+ # Status: temporarily disabled pending future refactoring
+ # pylint: disable=deprecated-method
etcd_config.readfp(FakeSecHead(fp))
for param in etcd_cert_params:
try:
etcd_certs_to_check.add(etcd_config.get('ETCD', param))
- except ConfigParser.NoOptionError:
+ except configparser.NoOptionError:
# That parameter does not exist, oh well...
pass
except IOError:
@@ -493,7 +511,8 @@ an OpenShift Container Platform cluster
c = fp.read()
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(c, now)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(c, now)
expire_check_result = {
'cert_cn': cert_subject,
@@ -501,11 +520,51 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
######################################################################
+ # Now the embedded etcd
+ ######################################################################
+ try:
+ with open('/etc/origin/master/master-config.yaml', 'r') as fp:
+ cfg = yaml.load(fp)
+ except IOError:
+ # Not present
+ pass
+ else:
+ if cfg.get('etcdConfig', {}).get('servingInfo', {}).get('certFile', None) is not None:
+ # This is embedded
+ etcd_crt_name = cfg['etcdConfig']['servingInfo']['certFile']
+ else:
+ # Not embedded
+ etcd_crt_name = None
+
+ if etcd_crt_name is not None:
+ # etcd_crt_name is relative to the location of the
+ # master-config.yaml file
+ cfg_path = os.path.dirname(fp.name)
+ etcd_cert = os.path.join(cfg_path, etcd_crt_name)
+ with open(etcd_cert, 'r') as etcd_fp:
+ (cert_subject,
+ cert_expiry_date,
+ time_remaining,
+ cert_serial) = load_and_handle_cert(etcd_fp.read(), now)
+
+ expire_check_result = {
+ 'cert_cn': cert_subject,
+ 'path': etcd_fp.name,
+ 'expiry': cert_expiry_date,
+ 'days_remaining': time_remaining.days,
+ 'health': None,
+ 'serial': cert_serial
+ }
+
+ classify_cert(expire_check_result, now, time_remaining, expire_window, etcd_certs)
+
+ ######################################################################
# /Check etcd certs
######################################################################
@@ -523,7 +582,7 @@ an OpenShift Container Platform cluster
######################################################################
# First the router certs
try:
- router_secrets_raw = subprocess.Popen('oc get secret router-certs -o yaml'.split(),
+ router_secrets_raw = subprocess.Popen('oc get -n default secret router-certs -o yaml'.split(),
stdout=subprocess.PIPE)
router_ds = yaml.load(router_secrets_raw.communicate()[0])
router_c = router_ds['data']['tls.crt']
@@ -537,7 +596,8 @@ an OpenShift Container Platform cluster
else:
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(router_c, now, base64decode=True)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(router_c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
@@ -545,6 +605,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, router_certs)
@@ -552,7 +613,7 @@ an OpenShift Container Platform cluster
######################################################################
# Now for registry
try:
- registry_secrets_raw = subprocess.Popen('oc get secret registry-certificates -o yaml'.split(),
+ registry_secrets_raw = subprocess.Popen('oc get -n default secret registry-certificates -o yaml'.split(),
stdout=subprocess.PIPE)
registry_ds = yaml.load(registry_secrets_raw.communicate()[0])
registry_c = registry_ds['data']['registry.crt']
@@ -566,7 +627,8 @@ an OpenShift Container Platform cluster
else:
(cert_subject,
cert_expiry_date,
- time_remaining) = load_and_handle_cert(registry_c, now, base64decode=True)
+ time_remaining,
+ cert_serial) = load_and_handle_cert(registry_c, now, base64decode=True)
expire_check_result = {
'cert_cn': cert_subject,
@@ -574,6 +636,7 @@ an OpenShift Container Platform cluster
'expiry': cert_expiry_date,
'days_remaining': time_remaining.days,
'health': None,
+ 'serial': cert_serial
}
classify_cert(expire_check_result, now, time_remaining, expire_window, registry_certs)
@@ -613,9 +676,13 @@ an OpenShift Container Platform cluster
# will be at the front of the list and certificates which will
# expire later are at the end. Router and registry certs should be
# limited to just 1 result, so don't bother sorting those.
- check_results['ocp_certs'] = sorted(check_results['ocp_certs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
- check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
- check_results['etcd'] = sorted(check_results['etcd'], cmp=lambda x, y: cmp(x['days_remaining'], y['days_remaining']))
+ def cert_key(item):
+ ''' return the days_remaining key '''
+ return item['days_remaining']
+
+ check_results['ocp_certs'] = sorted(check_results['ocp_certs'], key=cert_key)
+ check_results['kubeconfigs'] = sorted(check_results['kubeconfigs'], key=cert_key)
+ check_results['etcd'] = sorted(check_results['etcd'], key=cert_key)
# This module will never change anything, but we might want to
# change the return code parameter if there is some catastrophic
diff --git a/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2 b/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2
index b05110336..1d4bb24e9 100644
--- a/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2
+++ b/roles/openshift_certificate_expiry/templates/cert-expiry-table.html.j2
@@ -45,11 +45,20 @@
</div>
<div class="collapse navbar-collapse">
<p class="navbar-text navbar-right">
- <a href="https://docs.openshift.com/container-platform/latest/install_config/redeploying_certificates.html"
- target="_blank"
- class="navbar-link">
- <i class="glyphicon glyphicon-book"></i> Redeploying Certificates
- </a>
+ <button>
+ <a href="https://docs.openshift.com/container-platform/latest/install_config/redeploying_certificates.html"
+ target="_blank"
+ class="navbar-link">
+ <i class="glyphicon glyphicon-book"></i> Redeploying Certificates
+ </a>
+ </button>
+ <button>
+ <a href="https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_certificate_expiry"
+ target="_blank"
+ class="navbar-link">
+ <i class="glyphicon glyphicon-book"></i> Expiry Role Documentation
+ </a>
+ </button>
</p>
</div>
</div>
@@ -71,12 +80,13 @@
{# These are hard-coded right now, but should be grabbed dynamically from the registered results #}
{%- for kind in ['ocp_certs', 'etcd', 'kubeconfigs', 'router', 'registry'] -%}
<tr>
- <th colspan="6" style="text-align:center"><h2 class="cert-kind">{{ kind }}</h2></th>
+ <th colspan="7" style="text-align:center"><h2 class="cert-kind">{{ kind }}</h2></th>
</tr>
<tr>
<th>&nbsp;</th>
<th style="width:33%">Certificate Common/Alt Name(s)</th>
+ <td>Serial</th>
<th>Health</th>
<th>Days Remaining</th>
<th>Expiration Date</th>
@@ -98,6 +108,7 @@
<tr class="{{ loop.cycle('odd', 'even') }}">
<td style="text-align:center"><i class="{{ health_icon }}"></i></td>
<td style="width:33%">{{ v.cert_cn }}</td>
+ <td><code>int({{ v.serial }})/hex({{ v.serial_hex }})</code></td>
<td>{{ v.health }}</td>
<td>{{ v.days_remaining }}</td>
<td>{{ v.expiry }}</td>
@@ -114,7 +125,11 @@
<footer>
<p>
- Expiration report generated by <a href="https://github.com/openshift/openshift-ansible" target="_blank">openshift-ansible</a>
+ Expiration report generated by
+ the <a href="https://github.com/openshift/openshift-ansible"
+ target="_blank">openshift-ansible</a>
+ <a href="https://github.com/openshift/openshift-ansible/tree/master/roles/openshift_certificate_expiry"
+ target="_blank">certificate expiry</a> role.
</p>
<p>
Status icons from bootstrap/glyphicon
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 613c237a3..049ceffe0 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -9,6 +9,7 @@
additional_registries: "{{ openshift_docker_additional_registries | default(None) }}"
blocked_registries: "{{ openshift_docker_blocked_registries | default(None) }}"
insecure_registries: "{{ openshift_docker_insecure_registries | default(None) }}"
+ selinux_enabled: "{{ openshift_docker_selinux_enabled | default(None) }}"
log_driver: "{{ openshift_docker_log_driver | default(None) }}"
log_options: "{{ openshift_docker_log_options | default(None) }}"
options: "{{ openshift_docker_options | default(None) }}"
@@ -23,6 +24,7 @@
| default(omit) }}"
docker_insecure_registries: "{{ openshift.docker.insecure_registries
| default(omit) }}"
+ docker_selinux_enabled: "{{ openshift.docker.selinux_enabled | default(omit) }}"
docker_log_driver: "{{ openshift.docker.log_driver | default(omit) }}"
docker_log_options: "{{ openshift.docker.log_options | default(omit) }}"
docker_push_dockerhub: "{{ openshift.docker.disable_push_dockerhub
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index b139cc599..a501ad938 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -6,7 +6,9 @@
# This script should be run from openshift-ansible/roles/openshift_examples
XPAAS_VERSION=ose-v1.3.5
-ORIGIN_VERSION=${1:-v1.4}
+ORIGIN_VERSION=${1:-v1.5}
+RHAMP_TAG=1.0.0.GA
+RHAMP_TEMPLATE=https://raw.githubusercontent.com/3scale/rhamp-openshift-templates/${RHAMP_TAG}/apicast-gateway/apicast-gateway-template.yml
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
TEMP=`mktemp -d`
@@ -22,12 +24,13 @@ cp origin-master/examples/jenkins/jenkins-*template.json ${EXAMPLES_BASE}/quicks
cp origin-master/examples/image-streams/* ${EXAMPLES_BASE}/image-streams/
mv application-templates-${XPAAS_VERSION}/jboss-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/
find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secret*' ! -wholename '*demo*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \;
-wget https://raw.githubusercontent.com/jboss-fuse/application-templates/GA/fis-image-streams.json -O ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
+wget https://raw.githubusercontent.com/jboss-fuse/application-templates/GA/fis-image-streams.json -O ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/dotnet_imagestreams.json -O ${EXAMPLES_BASE}/image-streams/dotnet_imagestreams.json
-wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/metrics-deployer.yaml
-wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/metrics-deployer.yaml
-wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployer/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/origin/logging-deployer.yaml
-wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ${EXAMPLES_BASE}/infrastructure-templates/enterprise/logging-deployer.yaml
+wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/origin/metrics-deployer.yaml
+wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/enterprise/metrics-deployer.yaml
+wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployer/deployer.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/origin/logging-deployer.yaml
+wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/enterprise/logging-deployer.yaml
+wget ${RHAMP_TEMPLATE} -O ${EXAMPLES_BASE}/quickstart-templates/apicast-gateway-template.yml
popd
git diff files/examples
diff --git a/roles/openshift_examples/files/examples/latest b/roles/openshift_examples/files/examples/latest
index 64c411b81..59b6ef75f 120000
--- a/roles/openshift_examples/files/examples/latest
+++ b/roles/openshift_examples/files/examples/latest
@@ -1 +1 @@
-v1.4 \ No newline at end of file
+v1.5 \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-ephemeral-template.json
index 8e43bfbc3..cfbfc3e20 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-ephemeral-template.json
@@ -16,6 +16,18 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -100,11 +112,30 @@
"env": [
{
"name": "MYSQL_USER",
- "value": "${MYSQL_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "MYSQL_PASSWORD",
- "value": "${MYSQL_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
},
{
"name": "MYSQL_DATABASE",
@@ -176,6 +207,14 @@
"required": true
},
{
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MariaDB root Password",
+ "description": "Password for the MariaDB root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
"name": "MYSQL_DATABASE",
"displayName": "MariaDB Database Name",
"description": "Name of the MariaDB database accessed.",
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-persistent-template.json
index bc85277a9..e933eecf0 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/mariadb-persistent-template.json
@@ -16,6 +16,18 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -117,11 +129,30 @@
"env": [
{
"name": "MYSQL_USER",
- "value": "${MYSQL_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "MYSQL_PASSWORD",
- "value": "${MYSQL_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
},
{
"name": "MYSQL_DATABASE",
@@ -193,6 +224,14 @@
"required": true
},
{
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MariaDB root Password",
+ "description": "Password for the MariaDB root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
"name": "MYSQL_DATABASE",
"displayName": "MariaDB Database Name",
"description": "Name of the MariaDB database accessed.",
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-ephemeral-template.json
index 605601ef2..8b8fcb58b 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-ephemeral-template.json
@@ -17,6 +17,18 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MONGODB_USER}",
+ "database-password" : "${MONGODB_PASSWORD}",
+ "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -113,19 +125,34 @@
"env": [
{
"name": "MONGODB_USER",
- "value": "${MONGODB_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "MONGODB_PASSWORD",
- "value": "${MONGODB_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
},
{
- "name": "MONGODB_DATABASE",
- "value": "${MONGODB_DATABASE}"
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-admin-password"
+ }
+ }
},
{
- "name": "MONGODB_ADMIN_PASSWORD",
- "value": "${MONGODB_ADMIN_PASSWORD}"
+ "name": "MONGODB_DATABASE",
+ "value": "${MONGODB_DATABASE}"
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-persistent-template.json
index d2a0d01f0..72d3a8556 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/mongodb-persistent-template.json
@@ -17,6 +17,18 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MONGODB_USER}",
+ "database-password" : "${MONGODB_PASSWORD}",
+ "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -130,19 +142,34 @@
"env": [
{
"name": "MONGODB_USER",
- "value": "${MONGODB_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "MONGODB_PASSWORD",
- "value": "${MONGODB_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
},
{
- "name": "MONGODB_DATABASE",
- "value": "${MONGODB_DATABASE}"
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-admin-password"
+ }
+ }
},
{
- "name": "MONGODB_ADMIN_PASSWORD",
- "value": "${MONGODB_ADMIN_PASSWORD}"
+ "name": "MONGODB_DATABASE",
+ "value": "${MONGODB_DATABASE}"
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-ephemeral-template.json
index 0cea42f8b..34dd2ed78 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-ephemeral-template.json
@@ -5,16 +5,28 @@
"name": "mysql-ephemeral",
"annotations": {
"openshift.io/display-name": "MySQL (Ephemeral)",
- "description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mysql-database",
"tags": "database,mysql"
}
},
- "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.",
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.",
"labels": {
"template": "mysql-ephemeral-template"
},
"objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
{
"kind": "Service",
"apiVersion": "v1",
@@ -113,11 +125,30 @@
"env": [
{
"name": "MYSQL_USER",
- "value": "${MYSQL_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "MYSQL_PASSWORD",
- "value": "${MYSQL_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
},
{
"name": "MYSQL_DATABASE",
@@ -197,6 +228,14 @@
"required": true
},
{
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MySQL root user Password",
+ "description": "Password for the MySQL root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
"name": "MYSQL_DATABASE",
"displayName": "MySQL Database Name",
"description": "Name of the MySQL database accessed.",
@@ -206,8 +245,8 @@
{
"name": "MYSQL_VERSION",
"displayName": "Version of MySQL Image",
- "description": "Version of MySQL image to be used (5.5, 5.6 or latest).",
- "value": "5.6",
+ "description": "Version of MySQL image to be used (5.5, 5.6, 5.7, or latest).",
+ "value": "5.7",
"required": true
}
]
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-persistent-template.json
index fc7cd7d09..85c48da01 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/mysql-persistent-template.json
@@ -5,17 +5,29 @@
"name": "mysql-persistent",
"annotations": {
"openshift.io/display-name": "MySQL (Persistent)",
- "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql"
}
},
- "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.",
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.",
"labels": {
"template": "mysql-persistent-template"
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -117,11 +129,30 @@
"env": [
{
"name": "MYSQL_USER",
- "value": "${MYSQL_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "MYSQL_PASSWORD",
- "value": "${MYSQL_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
},
{
"name": "MYSQL_DATABASE",
@@ -193,6 +224,14 @@
"required": true
},
{
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MySQL root user Password",
+ "description": "Password for the MySQL root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
"name": "MYSQL_DATABASE",
"displayName": "MySQL Database Name",
"description": "Name of the MySQL database accessed.",
@@ -209,8 +248,8 @@
{
"name": "MYSQL_VERSION",
"displayName": "Version of MySQL Image",
- "description": "Version of MySQL image to be used (5.5, 5.6 or latest).",
- "value": "5.6",
+ "description": "Version of MySQL image to be used (5.5, 5.6, 5.7, or latest).",
+ "value": "5.7",
"required": true
}
]
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-ephemeral-template.json
index 505224b62..0d0a2a629 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-ephemeral-template.json
@@ -17,6 +17,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${POSTGRESQL_USER}",
+ "database-password" : "${POSTGRESQL_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -113,11 +124,21 @@
"env": [
{
"name": "POSTGRESQL_USER",
- "value": "${POSTGRESQL_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "POSTGRESQL_PASSWORD",
- "value": "${POSTGRESQL_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
},
{
"name": "POSTGRESQL_DATABASE",
diff --git a/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-persistent-template.json
index 7ff49782b..257726cfd 100644
--- a/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/db-templates/postgresql-persistent-template.json
@@ -17,6 +17,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${POSTGRESQL_USER}",
+ "database-password" : "${POSTGRESQL_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -130,11 +141,21 @@
"env": [
{
"name": "POSTGRESQL_USER",
- "value": "${POSTGRESQL_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "POSTGRESQL_PASSWORD",
- "value": "${POSTGRESQL_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
},
{
"name": "POSTGRESQL_DATABASE",
diff --git a/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json
index edaac73ca..1a90a9409 100644
--- a/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json
@@ -35,7 +35,7 @@
"openshift.io/display-name": "Ruby 2.0",
"description": "Build and run Ruby 2.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.",
"iconClass": "icon-ruby",
- "tags": "builder,ruby",
+ "tags": "hidden,builder,ruby",
"supports": "ruby:2.0,ruby",
"version": "2.0",
"sampleRepo": "https://github.com/openshift/ruby-ex.git"
@@ -164,7 +164,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "5.20"
+ "name": "5.24"
}
},
{
@@ -173,7 +173,7 @@
"openshift.io/display-name": "Perl 5.16",
"description": "Build and run Perl 5.16 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.",
"iconClass": "icon-perl",
- "tags": "builder,perl",
+ "tags": "hidden,builder,perl",
"supports":"perl:5.16,perl",
"version": "5.16",
"sampleRepo": "https://github.com/openshift/dancer-ex.git"
@@ -198,7 +198,22 @@
"kind": "DockerImage",
"name": "centos/perl-520-centos7:latest"
}
-
+ },
+ {
+ "name": "5.24",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.24",
+ "description": "Build and run Perl 5.24 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.24/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.24,perl",
+ "version": "5.24",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/perl-524-centos7:latest"
+ }
}
]
}
@@ -226,7 +241,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "5.6"
+ "name": "7.0"
}
},
{
@@ -235,7 +250,7 @@
"openshift.io/display-name": "PHP 5.5",
"description": "Build and run PHP 5.5 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.",
"iconClass": "icon-php",
- "tags": "builder,php",
+ "tags": "hidden,builder,php",
"supports":"php:5.5,php",
"version": "5.5",
"sampleRepo": "https://github.com/openshift/cakephp-ex.git"
@@ -260,6 +275,22 @@
"kind": "DockerImage",
"name": "centos/php-56-centos7:latest"
}
+ },
+ {
+ "name": "7.0",
+ "annotations": {
+ "openshift.io/display-name": "PHP 7.0",
+ "description": "Build and run PHP 7.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.0/README.md.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:7.0,php",
+ "version": "7.0",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/php-70-centos7:latest"
+ }
}
]
}
@@ -296,7 +327,7 @@
"openshift.io/display-name": "Python 3.3",
"description": "Build and run Python 3.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.",
"iconClass": "icon-python",
- "tags": "builder,python",
+ "tags": "hidden,builder,python",
"supports":"python:3.3,python",
"version": "3.3",
"sampleRepo": "https://github.com/openshift/django-ex.git"
@@ -471,7 +502,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "5.6"
+ "name": "5.7"
}
},
{
@@ -480,7 +511,7 @@
"openshift.io/display-name": "MySQL 5.5",
"description": "Provides a MySQL 5.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.",
"iconClass": "icon-mysql-database",
- "tags": "mysql",
+ "tags": "hidden,mysql",
"version": "5.5"
},
"from": {
@@ -501,6 +532,20 @@
"kind": "DockerImage",
"name": "centos/mysql-56-centos7:latest"
}
+ },
+ {
+ "name": "5.7",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.7",
+ "description": "Provides a MySQL 5.7 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.7/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql",
+ "version": "5.7"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/mysql-57-centos7:latest"
+ }
}
]
}
@@ -576,7 +621,7 @@
"openshift.io/display-name": "PostgreSQL 9.2",
"description": "Provides a PostgreSQL 9.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.",
"iconClass": "icon-postgresql",
- "tags": "postgresql",
+ "tags": "hidden,postgresql",
"version": "9.2"
},
"from": {
@@ -645,7 +690,7 @@
"openshift.io/display-name": "MongoDB 2.4",
"description": "Provides a MongoDB 2.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.",
"iconClass": "icon-mongodb",
- "tags": "mongodb",
+ "tags": "hidden,mongodb",
"version": "2.4"
},
"from": {
@@ -688,6 +733,47 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "redis",
+ "annotations": {
+ "openshift.io/display-name": "Redis"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Redis (Latest)",
+ "description": "Provides a Redis database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Redis available on OpenShift, including major versions updates.",
+ "iconClass": "icon-redis",
+ "tags": "redis"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.2"
+ }
+ },
+ {
+ "name": "3.2",
+ "annotations": {
+ "openshift.io/display-name": "Redis 3.2",
+ "description": "Provides a Redis 3.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.",
+ "iconClass": "icon-redis",
+ "tags": "redis",
+ "version": "3.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/redis-32-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "jenkins",
"annotations": {
"openshift.io/display-name": "Jenkins"
diff --git a/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-rhel7.json
index 88ee79a84..9b19b8bd0 100644
--- a/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-rhel7.json
@@ -35,7 +35,7 @@
"openshift.io/display-name": "Ruby 2.0",
"description": "Build and run Ruby 2.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.",
"iconClass": "icon-ruby",
- "tags": "builder,ruby",
+ "tags": "hidden,builder,ruby",
"supports": "ruby:2.0,ruby",
"version": "2.0",
"sampleRepo": "https://github.com/openshift/ruby-ex.git"
@@ -164,7 +164,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "5.20"
+ "name": "5.24"
}
},
{
@@ -173,7 +173,7 @@
"openshift.io/display-name": "Perl 5.16",
"description": "Build and run Perl 5.16 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.",
"iconClass": "icon-perl",
- "tags": "builder,perl",
+ "tags": "hidden,builder,perl",
"supports":"perl:5.16,perl",
"version": "5.16",
"sampleRepo": "https://github.com/openshift/dancer-ex.git"
@@ -198,7 +198,22 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/perl-520-rhel7:latest"
}
-
+ },
+ {
+ "name": "5.24",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.24",
+ "description": "Build and run Perl 5.24 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.24/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.24,perl",
+ "version": "5.24",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/perl-524-rhel7:latest"
+ }
}
]
}
@@ -235,7 +250,7 @@
"openshift.io/display-name": "PHP 5.5",
"description": "Build and run PHP 5.5 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.",
"iconClass": "icon-php",
- "tags": "builder,php",
+ "tags": "hidden,builder,php",
"supports":"php:5.5,php",
"version": "5.5",
"sampleRepo": "https://github.com/openshift/cakephp-ex.git"
@@ -260,6 +275,22 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/php-56-rhel7:latest"
}
+ },
+ {
+ "name": "7.0",
+ "annotations": {
+ "openshift.io/display-name": "PHP 7.0",
+ "description": "Build and run PHP 7.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.0/README.md.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:7.0,php",
+ "version": "7.0",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/php-70-rhel7:latest"
+ }
}
]
}
@@ -296,7 +327,7 @@
"openshift.io/display-name": "Python 3.3",
"description": "Build and run Python 3.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.",
"iconClass": "icon-python",
- "tags": "builder,python",
+ "tags": "hidden,builder,python",
"supports":"python:3.3,python",
"version": "3.3",
"sampleRepo": "https://github.com/openshift/django-ex.git"
@@ -378,7 +409,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "5.6"
+ "name": "5.7"
}
},
{
@@ -387,7 +418,7 @@
"openshift.io/display-name": "MySQL 5.5",
"description": "Provides a MySQL 5.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.",
"iconClass": "icon-mysql-database",
- "tags": "mysql",
+ "tags": "hidden,mysql",
"version": "5.5"
},
"from": {
@@ -408,6 +439,20 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/mysql-56-rhel7:latest"
}
+ },
+ {
+ "name": "5.7",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.7",
+ "description": "Provides a MySQL 5.7 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.7/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql",
+ "version": "5.7"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/mysql-57-rhel7:latest"
+ }
}
]
}
@@ -483,7 +528,7 @@
"openshift.io/display-name": "PostgreSQL 9.2",
"description": "Provides a PostgreSQL 9.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.",
"iconClass": "icon-postgresql",
- "tags": "postgresql",
+ "tags": "hidden,postgresql",
"version": "9.2"
},
"from": {
@@ -552,7 +597,7 @@
"openshift.io/display-name": "MongoDB 2.4",
"description": "Provides a MongoDB 2.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.",
"iconClass": "icon-mongodb",
- "tags": "mongodb",
+ "tags": "hidden,mongodb",
"version": "2.4"
},
"from": {
@@ -595,6 +640,47 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "redis",
+ "annotations": {
+ "openshift.io/display-name": "Redis"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Redis (Latest)",
+ "description": "Provides a Redis database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Redis available on OpenShift, including major versions updates.",
+ "iconClass": "icon-redis",
+ "tags": "redis"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.2"
+ }
+ },
+ {
+ "name": "3.2",
+ "annotations": {
+ "openshift.io/display-name": "Redis 3.2",
+ "description": "Provides a Redis 3.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.",
+ "iconClass": "icon-redis",
+ "tags": "redis",
+ "version": "3.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/redis-32-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "jenkins",
"annotations": {
"openshift.io/display-name": "Jenkins"
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/README.md
index e0db922a2..62765e03d 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/README.md
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/README.md
@@ -19,5 +19,4 @@ instantiating them.
Note: This file is processed by `hack/update-external-examples.sh`. New examples
must follow the exact syntax of the existing entries. Files in this directory
-are automatically pulled down, do not add additional files directly to this
-directory.
+are automatically pulled down, do not modify/add files to this directory.
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/apicast-gateway-template.yml b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/apicast-gateway-template.yml
new file mode 100644
index 000000000..34f5fcbcc
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/apicast-gateway-template.yml
@@ -0,0 +1,149 @@
+apiVersion: v1
+kind: Template
+metadata:
+ creationTimestamp: null
+ name: 3scale-gateway
+ annotations:
+ description: "3scale API Gateway"
+ iconClass: "icon-load-balancer"
+ tags: "api,gateway,3scale"
+objects:
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: ${THREESCALE_GATEWAY_NAME}
+ spec:
+ replicas: 2
+ selector:
+ deploymentconfig: ${THREESCALE_GATEWAY_NAME}
+ strategy:
+ resources: {}
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: ${THREESCALE_GATEWAY_NAME}
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ valueFrom:
+ secretKeyRef:
+ name: ${THREESCALE_PORTAL_ENDPOINT_SECRET}
+ key: password
+ - name: THREESCALE_CONFIG_FILE
+ value: ${THREESCALE_CONFIG_FILE}
+ - name: RESOLVER
+ value: ${RESOLVER}
+ - name: APICAST_SERVICES
+ value: ${APICAST_SERVICES}
+ - name: APICAST_MISSING_CONFIGURATION
+ value: ${MISSING_CONFIGURATION}
+ - name: APICAST_LOG_LEVEL
+ value: ${APICAST_LOG_LEVEL}
+ - name: APICAST_PATH_ROUTING_ENABLED
+ value: ${PATH_ROUTING}
+ - name: APICAST_RESPONSE_CODES
+ value: ${RESPONSE_CODES}
+ - name: APICAST_REQUEST_LOGS
+ value: ${REQUEST_LOGS}
+ - name: APICAST_RELOAD_CONFIG
+ value: ${APICAST_RELOAD_CONFIG}
+ image: ${THREESCALE_GATEWAY_IMAGE}
+ imagePullPolicy: Always
+ name: ${THREESCALE_GATEWAY_NAME}
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: 8090
+ initialDelaySeconds: 10
+ timeoutSeconds: 1
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: 8090
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ triggers:
+ - type: ConfigChange
+ status: {}
+- apiVersion: v1
+ kind: Service
+ metadata:
+ creationTimestamp: null
+ name: ${THREESCALE_GATEWAY_NAME}
+ spec:
+ ports:
+ - name: 8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ selector:
+ deploymentconfig: ${THREESCALE_GATEWAY_NAME}
+ sessionAffinity: None
+ type: ClusterIP
+ status:
+ loadBalancer: {}
+parameters:
+- description: "Name of the secret containing the THREESCALE_PORTAL_ENDPOINT with the access-token or provider key"
+ value: threescale-portal-endpoint-secret
+ name: THREESCALE_PORTAL_ENDPOINT_SECRET
+ required: true
+- description: "Path to saved JSON file with configuration for the gateway. Has to be injected to the docker image as read only volume."
+ value:
+ name: THREESCALE_CONFIG_FILE
+ required: false
+- description: "Name for the 3scale API Gateway"
+ value: threescalegw
+ name: THREESCALE_GATEWAY_NAME
+ required: true
+- description: "Docker image to use."
+ value: 'rhamp10/apicast-gateway:1.0.0-4'
+ name: THREESCALE_GATEWAY_IMAGE
+ required: true
+- description: "DNS Resolver for openresty, if empty it will be autodiscovered"
+ value:
+ name: RESOLVER
+ required: false
+- description: "Subset of services to run. Use comma separated list of service ids (eg. 42,1337)"
+ value:
+ name: APICAST_SERVICES
+ required: false
+- description: "What to do on missing or invalid configuration. Allowed values are: log, exit."
+ value: exit
+ required: false
+ name: MISSING_CONFIGURATION
+- description: "Log level. One of the following: debug, info, notice, warn, error, crit, alert, or emerg."
+ name: APICAST_LOG_LEVEL
+ required: false
+- description: "Enable path routing. Experimental feature."
+ name: PATH_ROUTING
+ required: false
+ value: "false"
+- description: "Enable traffic logging to 3scale. Includes whole request and response."
+ value: "false"
+ name: REQUEST_LOGS
+ required: false
+- description: "Enable logging response codes to 3scale."
+ value: "false"
+ name: RESPONSE_CODES
+ required: false
+- description: "Reload config on every request"
+ value: "false"
+ name: APICAST_RELOAD_CONFIG
+ required: false
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json
index 354978891..9dbbf89d1 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/cakephp-mysql.json
@@ -16,6 +16,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "databaseUser" : "${DATABASE_USER}",
+ "databasePassword" : "${DATABASE_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -212,11 +223,21 @@
},
{
"name": "DATABASE_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "DATABASE_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "CAKEPHP_SECRET_TOKEN",
@@ -349,12 +370,22 @@
},
"env": [
{
- "name": "MYSQL_USER",
- "value": "${DATABASE_USER}"
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
- "name": "MYSQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "MYSQL_DATABASE",
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json
index 9fc5be5e0..dccb8bf7f 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json
@@ -16,6 +16,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "databaseUser" : "${DATABASE_USER}",
+ "databasePassword" : "${DATABASE_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -115,7 +126,10 @@
"secret": "${GITHUB_WEBHOOK_SECRET}"
}
}
- ]
+ ],
+ "postCommit": {
+ "script": "perl -I extlib/lib/perl5 -I lib t/*"
+ }
}
},
{
@@ -190,11 +204,21 @@
},
{
"name": "MYSQL_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "MYSQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "MYSQL_DATABASE",
@@ -324,11 +348,21 @@
"env": [
{
"name": "MYSQL_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "MYSQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "MYSQL_DATABASE",
@@ -407,18 +441,6 @@
"from": "[a-zA-Z0-9]{40}"
},
{
- "name": "ADMIN_USERNAME",
- "displayName": "Administrator Username",
- "generate": "expression",
- "from": "admin[A-Z0-9]{3}"
- },
- {
- "name": "ADMIN_PASSWORD",
- "displayName": "Administrator Password",
- "generate": "expression",
- "from": "[a-zA-Z0-9]{8}"
- },
- {
"name": "DATABASE_SERVICE_NAME",
"displayName": "Database Service Name",
"required": true,
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json
index 590d5fd4f..59ff8a988 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json
@@ -16,6 +16,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "databaseUser" : "${DATABASE_USER}",
+ "databasePassword" : "${DATABASE_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -204,11 +215,21 @@
},
{
"name": "DATABASE_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "DATABASE_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "APP_CONFIG",
@@ -314,11 +335,21 @@
"env": [
{
"name": "POSTGRESQL_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "POSTGRESQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "POSTGRESQL_DATABASE",
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
index fc7423840..ab1c85b7e 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-ephemeral-template.json
@@ -89,7 +89,7 @@
},
"livenessProbe": {
"timeoutSeconds": 3,
- "initialDelaySeconds": 120,
+ "initialDelaySeconds": 420,
"failureThreshold" : 30,
"httpGet": {
"path": "/login",
@@ -98,14 +98,6 @@
},
"env": [
{
- "name": "OPENSHIFT_ENABLE_OAUTH",
- "value": "${ENABLE_OAUTH}"
- },
- {
- "name": "OPENSHIFT_ENABLE_REDIRECT_PROMPT",
- "value": "true"
- },
- {
"name": "KUBERNETES_MASTER",
"value": "https://kubernetes.default:443"
},
@@ -245,12 +237,6 @@
"value": "jenkins-jnlp"
},
{
- "name": "ENABLE_OAUTH",
- "displayName": "Enable OAuth in Jenkins",
- "description": "Whether to enable OAuth OpenShift integration. If false, the static account 'admin' will be initialized with the password 'password'.",
- "value": "true"
- },
- {
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
index acf59ee94..87c439ad2 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkins-persistent-template.json
@@ -106,7 +106,7 @@
},
"livenessProbe": {
"timeoutSeconds": 3,
- "initialDelaySeconds": 120,
+ "initialDelaySeconds": 420,
"failureThreshold" : 30,
"httpGet": {
"path": "/login",
@@ -115,14 +115,6 @@
},
"env": [
{
- "name": "OPENSHIFT_ENABLE_OAUTH",
- "value": "${ENABLE_OAUTH}"
- },
- {
- "name": "OPENSHIFT_ENABLE_REDIRECT_PROMPT",
- "value": "true"
- },
- {
"name": "KUBERNETES_MASTER",
"value": "https://kubernetes.default:443"
},
@@ -262,12 +254,6 @@
"value": "jenkins-jnlp"
},
{
- "name": "ENABLE_OAUTH",
- "displayName": "Enable OAuth in Jenkins",
- "description": "Whether to enable OAuth OpenShift integration. If false, the static account 'admin' will be initialized with the password 'password'.",
- "value": "true"
- },
- {
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json
index d4b4add18..91f9ec7b3 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json
@@ -16,6 +16,18 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData": {
+ "databaseUser": "${DATABASE_USER}",
+ "databasePassword": "${DATABASE_PASSWORD}",
+ "databaseAdminPassword" : "${DATABASE_ADMIN_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -88,10 +100,10 @@
"namespace": "${NAMESPACE}",
"name": "nodejs:4"
},
- "env": [
+ "env": [
{
- "name": "NPM_MIRROR",
- "value": "${NPM_MIRROR}"
+ "name": "NPM_MIRROR",
+ "value": "${NPM_MIRROR}"
}
]
}
@@ -186,11 +198,21 @@
},
{
"name": "MONGODB_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "MONGODB_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "MONGODB_DATABASE",
@@ -198,7 +220,12 @@
},
{
"name": "MONGODB_ADMIN_PASSWORD",
- "value": "${DATABASE_ADMIN_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseAdminPassword"
+ }
+ }
}
],
"readinessProbe": {
@@ -210,17 +237,17 @@
}
},
"livenessProbe": {
- "timeoutSeconds": 3,
- "initialDelaySeconds": 30,
- "httpGet": {
- "path": "/pagecount",
- "port": 8080
- }
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/pagecount",
+ "port": 8080
+ }
},
"resources": {
- "limits": {
- "memory": "${MEMORY_LIMIT}"
- }
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
}
}
]
@@ -306,11 +333,21 @@
"env": [
{
"name": "MONGODB_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "MONGODB_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "MONGODB_DATABASE",
@@ -318,14 +355,24 @@
},
{
"name": "MONGODB_ADMIN_PASSWORD",
- "value": "${DATABASE_ADMIN_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseAdminPassword"
+ }
+ }
}
],
"readinessProbe": {
"timeoutSeconds": 1,
"initialDelaySeconds": 3,
"exec": {
- "command": [ "/bin/sh", "-i", "-c", "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""]
+ "command": [
+ "/bin/sh",
+ "-i",
+ "-c",
+ "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""
+ ]
}
},
"livenessProbe": {
@@ -336,9 +383,9 @@
}
},
"resources": {
- "limits": {
- "memory": "${MEMORY_MONGODB_LIMIT}"
- }
+ "limits": {
+ "memory": "${MEMORY_MONGODB_LIMIT}"
+ }
},
"volumeMounts": [
{
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json
index baed15d8a..6373562c4 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json
@@ -16,6 +16,20 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "databaseUser" : "${DATABASE_USER}",
+ "databasePassword" : "${DATABASE_PASSWORD}",
+ "applicationUser" : "${APPLICATION_USER}",
+ "applicationPassword" : "${APPLICATION_PASSWORD}",
+ "keyBase" : "${SECRET_KEY_BASE}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -207,11 +221,21 @@
},
{
"name": "POSTGRESQL_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "POSTGRESQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "POSTGRESQL_DATABASE",
@@ -219,7 +243,12 @@
},
{
"name": "SECRET_KEY_BASE",
- "value": "${SECRET_KEY_BASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "keyBase"
+ }
+ }
},
{
"name": "POSTGRESQL_MAX_CONNECTIONS",
@@ -235,11 +264,21 @@
},
{
"name": "APPLICATION_USER",
- "value": "${APPLICATION_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "applicationUser"
+ }
+ }
},
{
"name": "APPLICATION_PASSWORD",
- "value": "${APPLICATION_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "applicationPassword"
+ }
+ }
},
{
"name": "RAILS_ENV",
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml
new file mode 100644
index 000000000..14bdd1dca
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-app-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cloudforms
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /opt/nfs/volumes-app
+ server: 10.19.0.216
+ persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml
new file mode 100644
index 000000000..709d8d976
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-pv-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: nfs-pv01
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /opt/nfs/volumes
+ server: 10.19.0.216
+ persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml
new file mode 100644
index 000000000..c8e3d4083
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/cfme-template.yaml
@@ -0,0 +1,479 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms
+metadata:
+ name: cloudforms
+ annotations:
+ description: "CloudForms appliance with persistent storage"
+ tags: "instant-app,cloudforms,cfme"
+ iconClass: "icon-rails"
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: "Exposes and load balances CloudForms pods"
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: ${NAME}
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: ${NAME}
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${NAME}
+ spec:
+ host: ${APPLICATION_DOMAIN}
+ port:
+ targetPort: https
+ tls:
+ termination: passthrough
+ to:
+ kind: Service
+ name: ${NAME}
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-app
+ annotations:
+ description: "Keeps track of changes in the CloudForms app image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: ${DATABASE_SERVICE_NAME}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${DATABASE_VOLUME_CAPACITY}
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: ${NAME}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${APPLICATION_VOLUME_CAPACITY}
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: ${NAME}
+ annotations:
+ description: "Defines how to deploy the CloudForms appliance"
+ spec:
+ template:
+ metadata:
+ labels:
+ name: ${NAME}
+ name: ${NAME}
+ spec:
+ volumes:
+ -
+ name: "cfme-app-volume"
+ persistentVolumeClaim:
+ claimName: ${NAME}
+ containers:
+ - image: cloudforms/cfme-openshift-app:${APPLICATION_IMG_TAG}
+ imagePullPolicy: IfNotPresent
+ name: cloudforms
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ volumeMounts:
+ -
+ name: "cfme-app-volume"
+ mountPath: "/persistent"
+ env:
+ -
+ name: "APPLICATION_INIT_DELAY"
+ value: "${APPLICATION_INIT_DELAY}"
+ -
+ name: "DATABASE_SERVICE_NAME"
+ value: "${DATABASE_SERVICE_NAME}"
+ -
+ name: "DATABASE_REGION"
+ value: "${DATABASE_REGION}"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ value: "${MEMCACHED_SERVICE_NAME}"
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ value: "${DATABASE_PASSWORD}"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ requests:
+ memory: "${MEMORY_APPLICATION_MIN}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/rh/cfme-container-scripts/sync-pv-data
+ replicas: 1
+ selector:
+ name: ${NAME}
+ triggers:
+ - type: "ConfigChange"
+ - type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "cloudforms"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-app:${APPLICATION_IMG_TAG}"
+ strategy:
+ type: "Recreate"
+ recreateParams:
+ timeoutSeconds: 1200
+- apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the memcached server"
+ spec:
+ ports:
+ -
+ name: "memcached"
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-memcached
+ annotations:
+ description: "Keeps track of changes in the CloudForms memcached image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy memcached"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "memcached"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ -
+ name: "memcached"
+ image: "cloudforms/cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ ports:
+ -
+ containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ value: "${MEMCACHED_MAX_MEMORY}"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ limits:
+ memory: "${MEMORY_MEMCACHED_LIMIT}"
+- apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the database server"
+ spec:
+ ports:
+ -
+ name: "postgresql"
+ port: 5432
+ targetPort: 5432
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-postgresql
+ annotations:
+ description: "Keeps track of changes in the CloudForms postgresql image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy the database"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "postgresql"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ labels:
+ name: "${DATABASE_SERVICE_NAME}"
+ spec:
+ volumes:
+ -
+ name: "cfme-pgdb-volume"
+ persistentVolumeClaim:
+ claimName: ${DATABASE_SERVICE_NAME}
+ containers:
+ -
+ name: "postgresql"
+ image: "cloudforms/cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ ports:
+ -
+ containerPort: 5432
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - "/bin/sh"
+ - "-i"
+ - "-c"
+ - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 60
+ tcpSocket:
+ port: 5432
+ volumeMounts:
+ -
+ name: "cfme-pgdb-volume"
+ mountPath: "/var/lib/pgsql/data"
+ env:
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ value: "${DATABASE_PASSWORD}"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ limits:
+ memory: "${MEMORY_POSTGRESQL_LIMIT}"
+
+parameters:
+ -
+ name: "NAME"
+ displayName: Name
+ required: true
+ description: "The name assigned to all of the frontend objects defined in this template."
+ value: cloudforms
+ -
+ name: "DATABASE_SERVICE_NAME"
+ displayName: "PostgreSQL Service Name"
+ required: true
+ description: "The name of the OpenShift Service exposed for the PostgreSQL container."
+ value: "postgresql"
+ -
+ name: "DATABASE_USER"
+ displayName: "PostgreSQL User"
+ required: true
+ description: "PostgreSQL user that will access the database."
+ value: "root"
+ -
+ name: "DATABASE_PASSWORD"
+ displayName: "PostgreSQL Password"
+ required: true
+ description: "Password for the PostgreSQL user."
+ value: "smartvm"
+ -
+ name: "DATABASE_NAME"
+ required: true
+ displayName: "PostgreSQL Database Name"
+ description: "Name of the PostgreSQL database accessed."
+ value: "vmdb_production"
+ -
+ name: "DATABASE_REGION"
+ required: true
+ displayName: "Application Database Region"
+ description: "Database region that will be used for application."
+ value: "0"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ required: true
+ displayName: "Memcached Service Name"
+ description: "The name of the OpenShift Service exposed for the Memcached container."
+ value: "memcached"
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ displayName: "Memcached Max Memory"
+ description: "Memcached maximum memory for memcached object storage in MB."
+ value: "64"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ displayName: "Memcached Max Connections"
+ description: "Memcached maximum number of connections allowed."
+ value: "1024"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ displayName: "Memcached Slab Page Size"
+ description: "Memcached size of each slab page."
+ value: "1m"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ displayName: "PostgreSQL Max Connections"
+ description: "PostgreSQL maximum number of database connections allowed."
+ value: "100"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ displayName: "PostgreSQL Shared Buffer Amount"
+ description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
+ value: "64MB"
+ -
+ name: "MEMORY_APPLICATION_MIN"
+ displayName: "Application Memory Minimum"
+ required: true
+ description: "Minimum amount of memory the Application container will need."
+ value: "4096Mi"
+ -
+ name: "MEMORY_POSTGRESQL_LIMIT"
+ displayName: "PostgreSQL Memory Limit"
+ required: true
+ description: "Maximum amount of memory the PostgreSQL container can use."
+ value: "2048Mi"
+ -
+ name: "MEMORY_MEMCACHED_LIMIT"
+ displayName: "Memcached Memory Limit"
+ required: true
+ description: "Maximum amount of memory the Memcached container can use."
+ value: "256Mi"
+ -
+ name: "POSTGRESQL_IMG_TAG"
+ displayName: "PostgreSQL Image Tag"
+ description: "This is the PostgreSQL image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "MEMCACHED_IMG_TAG"
+ displayName: "Memcached Image Tag"
+ description: "This is the Memcached image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "APPLICATION_IMG_TAG"
+ displayName: "Application Image Tag"
+ description: "This is the Application image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "APPLICATION_DOMAIN"
+ displayName: "Application Hostname"
+ description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted."
+ value: ""
+ -
+ name: "APPLICATION_INIT_DELAY"
+ displayName: "Application Init Delay"
+ required: true
+ description: "Delay in seconds before we attempt to initialize the application."
+ value: "30"
+ -
+ name: "APPLICATION_VOLUME_CAPACITY"
+ displayName: "Application Volume Capacity"
+ required: true
+ description: "Volume space available for application data."
+ value: "1Gi"
+ -
+ name: "DATABASE_VOLUME_CAPACITY"
+ displayName: "Database Volume Capacity"
+ required: true
+ description: "Volume space available for database."
+ value: "1Gi"
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json
index 8e43bfbc3..cfbfc3e20 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-ephemeral-template.json
@@ -16,6 +16,18 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -100,11 +112,30 @@
"env": [
{
"name": "MYSQL_USER",
- "value": "${MYSQL_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "MYSQL_PASSWORD",
- "value": "${MYSQL_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
},
{
"name": "MYSQL_DATABASE",
@@ -176,6 +207,14 @@
"required": true
},
{
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MariaDB root Password",
+ "description": "Password for the MariaDB root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
"name": "MYSQL_DATABASE",
"displayName": "MariaDB Database Name",
"description": "Name of the MariaDB database accessed.",
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json
index bc85277a9..e933eecf0 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mariadb-persistent-template.json
@@ -16,6 +16,18 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -117,11 +129,30 @@
"env": [
{
"name": "MYSQL_USER",
- "value": "${MYSQL_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "MYSQL_PASSWORD",
- "value": "${MYSQL_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
},
{
"name": "MYSQL_DATABASE",
@@ -193,6 +224,14 @@
"required": true
},
{
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MariaDB root Password",
+ "description": "Password for the MariaDB root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
"name": "MYSQL_DATABASE",
"displayName": "MariaDB Database Name",
"description": "Name of the MariaDB database accessed.",
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json
index 605601ef2..8b8fcb58b 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-ephemeral-template.json
@@ -17,6 +17,18 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MONGODB_USER}",
+ "database-password" : "${MONGODB_PASSWORD}",
+ "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -113,19 +125,34 @@
"env": [
{
"name": "MONGODB_USER",
- "value": "${MONGODB_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "MONGODB_PASSWORD",
- "value": "${MONGODB_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
},
{
- "name": "MONGODB_DATABASE",
- "value": "${MONGODB_DATABASE}"
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-admin-password"
+ }
+ }
},
{
- "name": "MONGODB_ADMIN_PASSWORD",
- "value": "${MONGODB_ADMIN_PASSWORD}"
+ "name": "MONGODB_DATABASE",
+ "value": "${MONGODB_DATABASE}"
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json
index d2a0d01f0..72d3a8556 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mongodb-persistent-template.json
@@ -17,6 +17,18 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MONGODB_USER}",
+ "database-password" : "${MONGODB_PASSWORD}",
+ "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -130,19 +142,34 @@
"env": [
{
"name": "MONGODB_USER",
- "value": "${MONGODB_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "MONGODB_PASSWORD",
- "value": "${MONGODB_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
},
{
- "name": "MONGODB_DATABASE",
- "value": "${MONGODB_DATABASE}"
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-admin-password"
+ }
+ }
},
{
- "name": "MONGODB_ADMIN_PASSWORD",
- "value": "${MONGODB_ADMIN_PASSWORD}"
+ "name": "MONGODB_DATABASE",
+ "value": "${MONGODB_DATABASE}"
}
],
"resources": {
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json
index 0cea42f8b..34dd2ed78 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-ephemeral-template.json
@@ -5,16 +5,28 @@
"name": "mysql-ephemeral",
"annotations": {
"openshift.io/display-name": "MySQL (Ephemeral)",
- "description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mysql-database",
"tags": "database,mysql"
}
},
- "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.",
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.",
"labels": {
"template": "mysql-ephemeral-template"
},
"objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
{
"kind": "Service",
"apiVersion": "v1",
@@ -113,11 +125,30 @@
"env": [
{
"name": "MYSQL_USER",
- "value": "${MYSQL_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "MYSQL_PASSWORD",
- "value": "${MYSQL_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
},
{
"name": "MYSQL_DATABASE",
@@ -197,6 +228,14 @@
"required": true
},
{
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MySQL root user Password",
+ "description": "Password for the MySQL root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
"name": "MYSQL_DATABASE",
"displayName": "MySQL Database Name",
"description": "Name of the MySQL database accessed.",
@@ -206,8 +245,8 @@
{
"name": "MYSQL_VERSION",
"displayName": "Version of MySQL Image",
- "description": "Version of MySQL image to be used (5.5, 5.6 or latest).",
- "value": "5.6",
+ "description": "Version of MySQL image to be used (5.5, 5.6, 5.7, or latest).",
+ "value": "5.7",
"required": true
}
]
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json
index fc7cd7d09..85c48da01 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/mysql-persistent-template.json
@@ -5,17 +5,29 @@
"name": "mysql-persistent",
"annotations": {
"openshift.io/display-name": "MySQL (Persistent)",
- "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql"
}
},
- "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.6/README.md.",
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.",
"labels": {
"template": "mysql-persistent-template"
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -117,11 +129,30 @@
"env": [
{
"name": "MYSQL_USER",
- "value": "${MYSQL_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "MYSQL_PASSWORD",
- "value": "${MYSQL_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
},
{
"name": "MYSQL_DATABASE",
@@ -193,6 +224,14 @@
"required": true
},
{
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MySQL root user Password",
+ "description": "Password for the MySQL root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
"name": "MYSQL_DATABASE",
"displayName": "MySQL Database Name",
"description": "Name of the MySQL database accessed.",
@@ -209,8 +248,8 @@
{
"name": "MYSQL_VERSION",
"displayName": "Version of MySQL Image",
- "description": "Version of MySQL image to be used (5.5, 5.6 or latest).",
- "value": "5.6",
+ "description": "Version of MySQL image to be used (5.5, 5.6, 5.7, or latest).",
+ "value": "5.7",
"required": true
}
]
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json
index 505224b62..0d0a2a629 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-ephemeral-template.json
@@ -17,6 +17,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${POSTGRESQL_USER}",
+ "database-password" : "${POSTGRESQL_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -113,11 +124,21 @@
"env": [
{
"name": "POSTGRESQL_USER",
- "value": "${POSTGRESQL_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "POSTGRESQL_PASSWORD",
- "value": "${POSTGRESQL_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
},
{
"name": "POSTGRESQL_DATABASE",
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json
index 7ff49782b..257726cfd 100644
--- a/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/postgresql-persistent-template.json
@@ -17,6 +17,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${POSTGRESQL_USER}",
+ "database-password" : "${POSTGRESQL_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -130,11 +141,21 @@
"env": [
{
"name": "POSTGRESQL_USER",
- "value": "${POSTGRESQL_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
},
{
"name": "POSTGRESQL_PASSWORD",
- "value": "${POSTGRESQL_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
},
{
"name": "POSTGRESQL_DATABASE",
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/redis-ephemeral-template.json
new file mode 100644
index 000000000..c9ae8a539
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/redis-ephemeral-template.json
@@ -0,0 +1,191 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redis-ephemeral",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "Redis (Ephemeral)",
+ "description": "Redis in-memory data structure store, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-redis",
+ "tags": "database,redis"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Password: ${REDIS_PASSWORD}\n Connection URL: redis://${DATABASE_SERVICE_NAME}:6379/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.",
+ "labels": {
+ "template": "redis-ephemeral-template"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "redis",
+ "protocol": "TCP",
+ "port": 6379,
+ "targetPort": 6379,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "redis"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "redis:${REDIS_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "redis",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 6379,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "test \"$(redis-cli -h 127.0.0.1 -a $REDIS_PASSWORD ping)\" == \"PONG\""]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 6379
+ }
+ },
+ "env": [
+ {
+ "name": "REDIS_PASSWORD",
+ "value": "${REDIS_PASSWORD}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/redis/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "redis",
+ "required": true
+ },
+ {
+ "name": "REDIS_PASSWORD",
+ "displayName": "Redis Connection Password",
+ "description": "Password for the Redis connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "REDIS_VERSION",
+ "displayName": "Version of Redis Image",
+ "description": "Version of Redis image to be used (3.2 or latest).",
+ "value": "3.2",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/db-templates/redis-persistent-template.json
new file mode 100644
index 000000000..e9db9ec9d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/db-templates/redis-persistent-template.json
@@ -0,0 +1,215 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redis-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "Redis (Persistent)",
+ "description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-redis",
+ "tags": "database,redis"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Password: ${REDIS_PASSWORD}\n Connection URL: redis://${DATABASE_SERVICE_NAME}:6379/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.",
+ "labels": {
+ "template": "redis-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "redis",
+ "protocol": "TCP",
+ "port": 6379,
+ "targetPort": 6379,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "redis"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "redis:${REDIS_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "redis",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 6379,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "test \"$(redis-cli -h 127.0.0.1 -a $REDIS_PASSWORD ping)\" == \"PONG\""]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 6379
+ }
+ },
+ "env": [
+ {
+ "name": "REDIS_PASSWORD",
+ "value": "${REDIS_PASSWORD}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/redis/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "redis",
+ "required": true
+ },
+ {
+ "name": "REDIS_PASSWORD",
+ "displayName": "Redis Connection Password",
+ "description": "Password for the Redis connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "REDIS_VERSION",
+ "displayName": "Version of Redis Image",
+ "description": "Version of Redis image to be used (3.2 or latest).",
+ "value": "3.2",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json
index edaac73ca..1a90a9409 100644
--- a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-centos7.json
@@ -35,7 +35,7 @@
"openshift.io/display-name": "Ruby 2.0",
"description": "Build and run Ruby 2.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.",
"iconClass": "icon-ruby",
- "tags": "builder,ruby",
+ "tags": "hidden,builder,ruby",
"supports": "ruby:2.0,ruby",
"version": "2.0",
"sampleRepo": "https://github.com/openshift/ruby-ex.git"
@@ -164,7 +164,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "5.20"
+ "name": "5.24"
}
},
{
@@ -173,7 +173,7 @@
"openshift.io/display-name": "Perl 5.16",
"description": "Build and run Perl 5.16 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.",
"iconClass": "icon-perl",
- "tags": "builder,perl",
+ "tags": "hidden,builder,perl",
"supports":"perl:5.16,perl",
"version": "5.16",
"sampleRepo": "https://github.com/openshift/dancer-ex.git"
@@ -198,7 +198,22 @@
"kind": "DockerImage",
"name": "centos/perl-520-centos7:latest"
}
-
+ },
+ {
+ "name": "5.24",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.24",
+ "description": "Build and run Perl 5.24 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.24/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.24,perl",
+ "version": "5.24",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/perl-524-centos7:latest"
+ }
}
]
}
@@ -226,7 +241,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "5.6"
+ "name": "7.0"
}
},
{
@@ -235,7 +250,7 @@
"openshift.io/display-name": "PHP 5.5",
"description": "Build and run PHP 5.5 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.",
"iconClass": "icon-php",
- "tags": "builder,php",
+ "tags": "hidden,builder,php",
"supports":"php:5.5,php",
"version": "5.5",
"sampleRepo": "https://github.com/openshift/cakephp-ex.git"
@@ -260,6 +275,22 @@
"kind": "DockerImage",
"name": "centos/php-56-centos7:latest"
}
+ },
+ {
+ "name": "7.0",
+ "annotations": {
+ "openshift.io/display-name": "PHP 7.0",
+ "description": "Build and run PHP 7.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.0/README.md.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:7.0,php",
+ "version": "7.0",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/php-70-centos7:latest"
+ }
}
]
}
@@ -296,7 +327,7 @@
"openshift.io/display-name": "Python 3.3",
"description": "Build and run Python 3.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.",
"iconClass": "icon-python",
- "tags": "builder,python",
+ "tags": "hidden,builder,python",
"supports":"python:3.3,python",
"version": "3.3",
"sampleRepo": "https://github.com/openshift/django-ex.git"
@@ -471,7 +502,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "5.6"
+ "name": "5.7"
}
},
{
@@ -480,7 +511,7 @@
"openshift.io/display-name": "MySQL 5.5",
"description": "Provides a MySQL 5.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.",
"iconClass": "icon-mysql-database",
- "tags": "mysql",
+ "tags": "hidden,mysql",
"version": "5.5"
},
"from": {
@@ -501,6 +532,20 @@
"kind": "DockerImage",
"name": "centos/mysql-56-centos7:latest"
}
+ },
+ {
+ "name": "5.7",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.7",
+ "description": "Provides a MySQL 5.7 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.7/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql",
+ "version": "5.7"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/mysql-57-centos7:latest"
+ }
}
]
}
@@ -576,7 +621,7 @@
"openshift.io/display-name": "PostgreSQL 9.2",
"description": "Provides a PostgreSQL 9.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.",
"iconClass": "icon-postgresql",
- "tags": "postgresql",
+ "tags": "hidden,postgresql",
"version": "9.2"
},
"from": {
@@ -645,7 +690,7 @@
"openshift.io/display-name": "MongoDB 2.4",
"description": "Provides a MongoDB 2.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.",
"iconClass": "icon-mongodb",
- "tags": "mongodb",
+ "tags": "hidden,mongodb",
"version": "2.4"
},
"from": {
@@ -688,6 +733,47 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "redis",
+ "annotations": {
+ "openshift.io/display-name": "Redis"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Redis (Latest)",
+ "description": "Provides a Redis database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Redis available on OpenShift, including major versions updates.",
+ "iconClass": "icon-redis",
+ "tags": "redis"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.2"
+ }
+ },
+ {
+ "name": "3.2",
+ "annotations": {
+ "openshift.io/display-name": "Redis 3.2",
+ "description": "Provides a Redis 3.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.",
+ "iconClass": "icon-redis",
+ "tags": "redis",
+ "version": "3.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/redis-32-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "jenkins",
"annotations": {
"openshift.io/display-name": "Jenkins"
diff --git a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
index 88ee79a84..9b19b8bd0 100644
--- a/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v1.4/image-streams/image-streams-rhel7.json
@@ -35,7 +35,7 @@
"openshift.io/display-name": "Ruby 2.0",
"description": "Build and run Ruby 2.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.",
"iconClass": "icon-ruby",
- "tags": "builder,ruby",
+ "tags": "hidden,builder,ruby",
"supports": "ruby:2.0,ruby",
"version": "2.0",
"sampleRepo": "https://github.com/openshift/ruby-ex.git"
@@ -164,7 +164,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "5.20"
+ "name": "5.24"
}
},
{
@@ -173,7 +173,7 @@
"openshift.io/display-name": "Perl 5.16",
"description": "Build and run Perl 5.16 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.",
"iconClass": "icon-perl",
- "tags": "builder,perl",
+ "tags": "hidden,builder,perl",
"supports":"perl:5.16,perl",
"version": "5.16",
"sampleRepo": "https://github.com/openshift/dancer-ex.git"
@@ -198,7 +198,22 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/perl-520-rhel7:latest"
}
-
+ },
+ {
+ "name": "5.24",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.24",
+ "description": "Build and run Perl 5.24 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.24/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.24,perl",
+ "version": "5.24",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/perl-524-rhel7:latest"
+ }
}
]
}
@@ -235,7 +250,7 @@
"openshift.io/display-name": "PHP 5.5",
"description": "Build and run PHP 5.5 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.",
"iconClass": "icon-php",
- "tags": "builder,php",
+ "tags": "hidden,builder,php",
"supports":"php:5.5,php",
"version": "5.5",
"sampleRepo": "https://github.com/openshift/cakephp-ex.git"
@@ -260,6 +275,22 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/php-56-rhel7:latest"
}
+ },
+ {
+ "name": "7.0",
+ "annotations": {
+ "openshift.io/display-name": "PHP 7.0",
+ "description": "Build and run PHP 7.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.0/README.md.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:7.0,php",
+ "version": "7.0",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/php-70-rhel7:latest"
+ }
}
]
}
@@ -296,7 +327,7 @@
"openshift.io/display-name": "Python 3.3",
"description": "Build and run Python 3.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.",
"iconClass": "icon-python",
- "tags": "builder,python",
+ "tags": "hidden,builder,python",
"supports":"python:3.3,python",
"version": "3.3",
"sampleRepo": "https://github.com/openshift/django-ex.git"
@@ -378,7 +409,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "5.6"
+ "name": "5.7"
}
},
{
@@ -387,7 +418,7 @@
"openshift.io/display-name": "MySQL 5.5",
"description": "Provides a MySQL 5.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.",
"iconClass": "icon-mysql-database",
- "tags": "mysql",
+ "tags": "hidden,mysql",
"version": "5.5"
},
"from": {
@@ -408,6 +439,20 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/mysql-56-rhel7:latest"
}
+ },
+ {
+ "name": "5.7",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.7",
+ "description": "Provides a MySQL 5.7 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.7/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql",
+ "version": "5.7"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/mysql-57-rhel7:latest"
+ }
}
]
}
@@ -483,7 +528,7 @@
"openshift.io/display-name": "PostgreSQL 9.2",
"description": "Provides a PostgreSQL 9.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.",
"iconClass": "icon-postgresql",
- "tags": "postgresql",
+ "tags": "hidden,postgresql",
"version": "9.2"
},
"from": {
@@ -552,7 +597,7 @@
"openshift.io/display-name": "MongoDB 2.4",
"description": "Provides a MongoDB 2.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.",
"iconClass": "icon-mongodb",
- "tags": "mongodb",
+ "tags": "hidden,mongodb",
"version": "2.4"
},
"from": {
@@ -595,6 +640,47 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "redis",
+ "annotations": {
+ "openshift.io/display-name": "Redis"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Redis (Latest)",
+ "description": "Provides a Redis database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Redis available on OpenShift, including major versions updates.",
+ "iconClass": "icon-redis",
+ "tags": "redis"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.2"
+ }
+ },
+ {
+ "name": "3.2",
+ "annotations": {
+ "openshift.io/display-name": "Redis 3.2",
+ "description": "Provides a Redis 3.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.",
+ "iconClass": "icon-redis",
+ "tags": "redis",
+ "version": "3.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/redis-32-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "jenkins",
"annotations": {
"openshift.io/display-name": "Jenkins"
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/README.md
index e0db922a2..62765e03d 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/README.md
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/README.md
@@ -19,5 +19,4 @@ instantiating them.
Note: This file is processed by `hack/update-external-examples.sh`. New examples
must follow the exact syntax of the existing entries. Files in this directory
-are automatically pulled down, do not add additional files directly to this
-directory.
+are automatically pulled down, do not modify/add files to this directory.
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/apicast-gateway-template.yml b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/apicast-gateway-template.yml
new file mode 100644
index 000000000..34f5fcbcc
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/apicast-gateway-template.yml
@@ -0,0 +1,149 @@
+apiVersion: v1
+kind: Template
+metadata:
+ creationTimestamp: null
+ name: 3scale-gateway
+ annotations:
+ description: "3scale API Gateway"
+ iconClass: "icon-load-balancer"
+ tags: "api,gateway,3scale"
+objects:
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: ${THREESCALE_GATEWAY_NAME}
+ spec:
+ replicas: 2
+ selector:
+ deploymentconfig: ${THREESCALE_GATEWAY_NAME}
+ strategy:
+ resources: {}
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: ${THREESCALE_GATEWAY_NAME}
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ valueFrom:
+ secretKeyRef:
+ name: ${THREESCALE_PORTAL_ENDPOINT_SECRET}
+ key: password
+ - name: THREESCALE_CONFIG_FILE
+ value: ${THREESCALE_CONFIG_FILE}
+ - name: RESOLVER
+ value: ${RESOLVER}
+ - name: APICAST_SERVICES
+ value: ${APICAST_SERVICES}
+ - name: APICAST_MISSING_CONFIGURATION
+ value: ${MISSING_CONFIGURATION}
+ - name: APICAST_LOG_LEVEL
+ value: ${APICAST_LOG_LEVEL}
+ - name: APICAST_PATH_ROUTING_ENABLED
+ value: ${PATH_ROUTING}
+ - name: APICAST_RESPONSE_CODES
+ value: ${RESPONSE_CODES}
+ - name: APICAST_REQUEST_LOGS
+ value: ${REQUEST_LOGS}
+ - name: APICAST_RELOAD_CONFIG
+ value: ${APICAST_RELOAD_CONFIG}
+ image: ${THREESCALE_GATEWAY_IMAGE}
+ imagePullPolicy: Always
+ name: ${THREESCALE_GATEWAY_NAME}
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: 8090
+ initialDelaySeconds: 10
+ timeoutSeconds: 1
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: 8090
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ triggers:
+ - type: ConfigChange
+ status: {}
+- apiVersion: v1
+ kind: Service
+ metadata:
+ creationTimestamp: null
+ name: ${THREESCALE_GATEWAY_NAME}
+ spec:
+ ports:
+ - name: 8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ selector:
+ deploymentconfig: ${THREESCALE_GATEWAY_NAME}
+ sessionAffinity: None
+ type: ClusterIP
+ status:
+ loadBalancer: {}
+parameters:
+- description: "Name of the secret containing the THREESCALE_PORTAL_ENDPOINT with the access-token or provider key"
+ value: threescale-portal-endpoint-secret
+ name: THREESCALE_PORTAL_ENDPOINT_SECRET
+ required: true
+- description: "Path to saved JSON file with configuration for the gateway. Has to be injected to the docker image as read only volume."
+ value:
+ name: THREESCALE_CONFIG_FILE
+ required: false
+- description: "Name for the 3scale API Gateway"
+ value: threescalegw
+ name: THREESCALE_GATEWAY_NAME
+ required: true
+- description: "Docker image to use."
+ value: 'rhamp10/apicast-gateway:1.0.0-4'
+ name: THREESCALE_GATEWAY_IMAGE
+ required: true
+- description: "DNS Resolver for openresty, if empty it will be autodiscovered"
+ value:
+ name: RESOLVER
+ required: false
+- description: "Subset of services to run. Use comma separated list of service ids (eg. 42,1337)"
+ value:
+ name: APICAST_SERVICES
+ required: false
+- description: "What to do on missing or invalid configuration. Allowed values are: log, exit."
+ value: exit
+ required: false
+ name: MISSING_CONFIGURATION
+- description: "Log level. One of the following: debug, info, notice, warn, error, crit, alert, or emerg."
+ name: APICAST_LOG_LEVEL
+ required: false
+- description: "Enable path routing. Experimental feature."
+ name: PATH_ROUTING
+ required: false
+ value: "false"
+- description: "Enable traffic logging to 3scale. Includes whole request and response."
+ value: "false"
+ name: REQUEST_LOGS
+ required: false
+- description: "Enable logging response codes to 3scale."
+ value: "false"
+ name: RESPONSE_CODES
+ required: false
+- description: "Reload config on every request"
+ value: "false"
+ name: APICAST_RELOAD_CONFIG
+ required: false
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json
index 354978891..9dbbf89d1 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/cakephp-mysql.json
@@ -16,6 +16,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "databaseUser" : "${DATABASE_USER}",
+ "databasePassword" : "${DATABASE_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -212,11 +223,21 @@
},
{
"name": "DATABASE_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "DATABASE_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "CAKEPHP_SECRET_TOKEN",
@@ -349,12 +370,22 @@
},
"env": [
{
- "name": "MYSQL_USER",
- "value": "${DATABASE_USER}"
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
- "name": "MYSQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "MYSQL_DATABASE",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json
index 9fc5be5e0..dccb8bf7f 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/dancer-mysql.json
@@ -16,6 +16,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "databaseUser" : "${DATABASE_USER}",
+ "databasePassword" : "${DATABASE_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -115,7 +126,10 @@
"secret": "${GITHUB_WEBHOOK_SECRET}"
}
}
- ]
+ ],
+ "postCommit": {
+ "script": "perl -I extlib/lib/perl5 -I lib t/*"
+ }
}
},
{
@@ -190,11 +204,21 @@
},
{
"name": "MYSQL_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "MYSQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "MYSQL_DATABASE",
@@ -324,11 +348,21 @@
"env": [
{
"name": "MYSQL_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "MYSQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "MYSQL_DATABASE",
@@ -407,18 +441,6 @@
"from": "[a-zA-Z0-9]{40}"
},
{
- "name": "ADMIN_USERNAME",
- "displayName": "Administrator Username",
- "generate": "expression",
- "from": "admin[A-Z0-9]{3}"
- },
- {
- "name": "ADMIN_PASSWORD",
- "displayName": "Administrator Password",
- "generate": "expression",
- "from": "[a-zA-Z0-9]{8}"
- },
- {
"name": "DATABASE_SERVICE_NAME",
"displayName": "Database Service Name",
"required": true,
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json
index 590d5fd4f..59ff8a988 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/django-postgresql.json
@@ -16,6 +16,17 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "databaseUser" : "${DATABASE_USER}",
+ "databasePassword" : "${DATABASE_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -204,11 +215,21 @@
},
{
"name": "DATABASE_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "DATABASE_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "APP_CONFIG",
@@ -314,11 +335,21 @@
"env": [
{
"name": "POSTGRESQL_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "POSTGRESQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "POSTGRESQL_DATABASE",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json
index fc7423840..62ccc5b7f 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-ephemeral-template.json
@@ -89,7 +89,7 @@
},
"livenessProbe": {
"timeoutSeconds": 3,
- "initialDelaySeconds": 120,
+ "initialDelaySeconds": 420,
"failureThreshold" : 30,
"httpGet": {
"path": "/login",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json
index acf59ee94..50c4ad566 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/jenkins-persistent-template.json
@@ -106,7 +106,7 @@
},
"livenessProbe": {
"timeoutSeconds": 3,
- "initialDelaySeconds": 120,
+ "initialDelaySeconds": 420,
"failureThreshold" : 30,
"httpGet": {
"path": "/login",
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json
index d4b4add18..91f9ec7b3 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/nodejs-mongodb.json
@@ -16,6 +16,18 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData": {
+ "databaseUser": "${DATABASE_USER}",
+ "databasePassword": "${DATABASE_PASSWORD}",
+ "databaseAdminPassword" : "${DATABASE_ADMIN_PASSWORD}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -88,10 +100,10 @@
"namespace": "${NAMESPACE}",
"name": "nodejs:4"
},
- "env": [
+ "env": [
{
- "name": "NPM_MIRROR",
- "value": "${NPM_MIRROR}"
+ "name": "NPM_MIRROR",
+ "value": "${NPM_MIRROR}"
}
]
}
@@ -186,11 +198,21 @@
},
{
"name": "MONGODB_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "MONGODB_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "MONGODB_DATABASE",
@@ -198,7 +220,12 @@
},
{
"name": "MONGODB_ADMIN_PASSWORD",
- "value": "${DATABASE_ADMIN_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseAdminPassword"
+ }
+ }
}
],
"readinessProbe": {
@@ -210,17 +237,17 @@
}
},
"livenessProbe": {
- "timeoutSeconds": 3,
- "initialDelaySeconds": 30,
- "httpGet": {
- "path": "/pagecount",
- "port": 8080
- }
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/pagecount",
+ "port": 8080
+ }
},
"resources": {
- "limits": {
- "memory": "${MEMORY_LIMIT}"
- }
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
}
}
]
@@ -306,11 +333,21 @@
"env": [
{
"name": "MONGODB_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "MONGODB_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "MONGODB_DATABASE",
@@ -318,14 +355,24 @@
},
{
"name": "MONGODB_ADMIN_PASSWORD",
- "value": "${DATABASE_ADMIN_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseAdminPassword"
+ }
+ }
}
],
"readinessProbe": {
"timeoutSeconds": 1,
"initialDelaySeconds": 3,
"exec": {
- "command": [ "/bin/sh", "-i", "-c", "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""]
+ "command": [
+ "/bin/sh",
+ "-i",
+ "-c",
+ "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""
+ ]
}
},
"livenessProbe": {
@@ -336,9 +383,9 @@
}
},
"resources": {
- "limits": {
- "memory": "${MEMORY_MONGODB_LIMIT}"
- }
+ "limits": {
+ "memory": "${MEMORY_MONGODB_LIMIT}"
+ }
},
"volumeMounts": [
{
diff --git a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json
index baed15d8a..6373562c4 100644
--- a/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.4/quickstart-templates/rails-postgresql.json
@@ -16,6 +16,20 @@
},
"objects": [
{
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "databaseUser" : "${DATABASE_USER}",
+ "databasePassword" : "${DATABASE_PASSWORD}",
+ "applicationUser" : "${APPLICATION_USER}",
+ "applicationPassword" : "${APPLICATION_PASSWORD}",
+ "keyBase" : "${SECRET_KEY_BASE}"
+ }
+ },
+ {
"kind": "Service",
"apiVersion": "v1",
"metadata": {
@@ -207,11 +221,21 @@
},
{
"name": "POSTGRESQL_USER",
- "value": "${DATABASE_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
},
{
"name": "POSTGRESQL_PASSWORD",
- "value": "${DATABASE_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
},
{
"name": "POSTGRESQL_DATABASE",
@@ -219,7 +243,12 @@
},
{
"name": "SECRET_KEY_BASE",
- "value": "${SECRET_KEY_BASE}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "keyBase"
+ }
+ }
},
{
"name": "POSTGRESQL_MAX_CONNECTIONS",
@@ -235,11 +264,21 @@
},
{
"name": "APPLICATION_USER",
- "value": "${APPLICATION_USER}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "applicationUser"
+ }
+ }
},
{
"name": "APPLICATION_PASSWORD",
- "value": "${APPLICATION_PASSWORD}"
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "applicationPassword"
+ }
+ }
},
{
"name": "RAILS_ENV",
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-app-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-app-example.yaml
new file mode 100644
index 000000000..14bdd1dca
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-app-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cloudforms
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /opt/nfs/volumes-app
+ server: 10.19.0.216
+ persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-example.yaml
new file mode 100644
index 000000000..709d8d976
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-pv-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: nfs-pv01
+spec:
+ capacity:
+ storage: 2Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: /opt/nfs/volumes
+ server: 10.19.0.216
+ persistentVolumeReclaimPolicy: Recycle
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml
new file mode 100644
index 000000000..c8e3d4083
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/cfme-template.yaml
@@ -0,0 +1,479 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms
+metadata:
+ name: cloudforms
+ annotations:
+ description: "CloudForms appliance with persistent storage"
+ tags: "instant-app,cloudforms,cfme"
+ iconClass: "icon-rails"
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: "Exposes and load balances CloudForms pods"
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: ${NAME}
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: ${NAME}
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${NAME}
+ spec:
+ host: ${APPLICATION_DOMAIN}
+ port:
+ targetPort: https
+ tls:
+ termination: passthrough
+ to:
+ kind: Service
+ name: ${NAME}
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-app
+ annotations:
+ description: "Keeps track of changes in the CloudForms app image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-app
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: ${DATABASE_SERVICE_NAME}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${DATABASE_VOLUME_CAPACITY}
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: ${NAME}
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: ${APPLICATION_VOLUME_CAPACITY}
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: ${NAME}
+ annotations:
+ description: "Defines how to deploy the CloudForms appliance"
+ spec:
+ template:
+ metadata:
+ labels:
+ name: ${NAME}
+ name: ${NAME}
+ spec:
+ volumes:
+ -
+ name: "cfme-app-volume"
+ persistentVolumeClaim:
+ claimName: ${NAME}
+ containers:
+ - image: cloudforms/cfme-openshift-app:${APPLICATION_IMG_TAG}
+ imagePullPolicy: IfNotPresent
+ name: cloudforms
+ livenessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: /
+ port: 80
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ volumeMounts:
+ -
+ name: "cfme-app-volume"
+ mountPath: "/persistent"
+ env:
+ -
+ name: "APPLICATION_INIT_DELAY"
+ value: "${APPLICATION_INIT_DELAY}"
+ -
+ name: "DATABASE_SERVICE_NAME"
+ value: "${DATABASE_SERVICE_NAME}"
+ -
+ name: "DATABASE_REGION"
+ value: "${DATABASE_REGION}"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ value: "${MEMCACHED_SERVICE_NAME}"
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ value: "${DATABASE_PASSWORD}"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ requests:
+ memory: "${MEMORY_APPLICATION_MIN}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - /opt/rh/cfme-container-scripts/sync-pv-data
+ replicas: 1
+ selector:
+ name: ${NAME}
+ triggers:
+ - type: "ConfigChange"
+ - type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "cloudforms"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-app:${APPLICATION_IMG_TAG}"
+ strategy:
+ type: "Recreate"
+ recreateParams:
+ timeoutSeconds: 1200
+- apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the memcached server"
+ spec:
+ ports:
+ -
+ name: "memcached"
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-memcached
+ annotations:
+ description: "Keeps track of changes in the CloudForms memcached image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-memcached
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy memcached"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "memcached"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ -
+ name: "memcached"
+ image: "cloudforms/cfme-openshift-memcached:${MEMCACHED_IMG_TAG}"
+ ports:
+ -
+ containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ value: "${MEMCACHED_MAX_MEMORY}"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ limits:
+ memory: "${MEMORY_MEMCACHED_LIMIT}"
+- apiVersion: v1
+ kind: "Service"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Exposes the database server"
+ spec:
+ ports:
+ -
+ name: "postgresql"
+ port: 5432
+ targetPort: 5432
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: ImageStream
+ metadata:
+ name: cfme-openshift-postgresql
+ annotations:
+ description: "Keeps track of changes in the CloudForms postgresql image"
+ spec:
+ dockerImageRepository: registry.access.redhat.com/cloudforms/cfme-openshift-postgresql
+- apiVersion: v1
+ kind: "DeploymentConfig"
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: "Defines how to deploy the database"
+ spec:
+ strategy:
+ type: "Recreate"
+ triggers:
+ -
+ type: "ImageChange"
+ imageChangeParams:
+ automatic: false
+ containerNames:
+ - "postgresql"
+ from:
+ kind: "ImageStreamTag"
+ name: "cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ -
+ type: "ConfigChange"
+ replicas: 1
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ labels:
+ name: "${DATABASE_SERVICE_NAME}"
+ spec:
+ volumes:
+ -
+ name: "cfme-pgdb-volume"
+ persistentVolumeClaim:
+ claimName: ${DATABASE_SERVICE_NAME}
+ containers:
+ -
+ name: "postgresql"
+ image: "cloudforms/cfme-openshift-postgresql:${POSTGRESQL_IMG_TAG}"
+ ports:
+ -
+ containerPort: 5432
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - "/bin/sh"
+ - "-i"
+ - "-c"
+ - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 60
+ tcpSocket:
+ port: 5432
+ volumeMounts:
+ -
+ name: "cfme-pgdb-volume"
+ mountPath: "/var/lib/pgsql/data"
+ env:
+ -
+ name: "POSTGRESQL_USER"
+ value: "${DATABASE_USER}"
+ -
+ name: "POSTGRESQL_PASSWORD"
+ value: "${DATABASE_PASSWORD}"
+ -
+ name: "POSTGRESQL_DATABASE"
+ value: "${DATABASE_NAME}"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ resources:
+ limits:
+ memory: "${MEMORY_POSTGRESQL_LIMIT}"
+
+parameters:
+ -
+ name: "NAME"
+ displayName: Name
+ required: true
+ description: "The name assigned to all of the frontend objects defined in this template."
+ value: cloudforms
+ -
+ name: "DATABASE_SERVICE_NAME"
+ displayName: "PostgreSQL Service Name"
+ required: true
+ description: "The name of the OpenShift Service exposed for the PostgreSQL container."
+ value: "postgresql"
+ -
+ name: "DATABASE_USER"
+ displayName: "PostgreSQL User"
+ required: true
+ description: "PostgreSQL user that will access the database."
+ value: "root"
+ -
+ name: "DATABASE_PASSWORD"
+ displayName: "PostgreSQL Password"
+ required: true
+ description: "Password for the PostgreSQL user."
+ value: "smartvm"
+ -
+ name: "DATABASE_NAME"
+ required: true
+ displayName: "PostgreSQL Database Name"
+ description: "Name of the PostgreSQL database accessed."
+ value: "vmdb_production"
+ -
+ name: "DATABASE_REGION"
+ required: true
+ displayName: "Application Database Region"
+ description: "Database region that will be used for application."
+ value: "0"
+ -
+ name: "MEMCACHED_SERVICE_NAME"
+ required: true
+ displayName: "Memcached Service Name"
+ description: "The name of the OpenShift Service exposed for the Memcached container."
+ value: "memcached"
+ -
+ name: "MEMCACHED_MAX_MEMORY"
+ displayName: "Memcached Max Memory"
+ description: "Memcached maximum memory for memcached object storage in MB."
+ value: "64"
+ -
+ name: "MEMCACHED_MAX_CONNECTIONS"
+ displayName: "Memcached Max Connections"
+ description: "Memcached maximum number of connections allowed."
+ value: "1024"
+ -
+ name: "MEMCACHED_SLAB_PAGE_SIZE"
+ displayName: "Memcached Slab Page Size"
+ description: "Memcached size of each slab page."
+ value: "1m"
+ -
+ name: "POSTGRESQL_MAX_CONNECTIONS"
+ displayName: "PostgreSQL Max Connections"
+ description: "PostgreSQL maximum number of database connections allowed."
+ value: "100"
+ -
+ name: "POSTGRESQL_SHARED_BUFFERS"
+ displayName: "PostgreSQL Shared Buffer Amount"
+ description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
+ value: "64MB"
+ -
+ name: "MEMORY_APPLICATION_MIN"
+ displayName: "Application Memory Minimum"
+ required: true
+ description: "Minimum amount of memory the Application container will need."
+ value: "4096Mi"
+ -
+ name: "MEMORY_POSTGRESQL_LIMIT"
+ displayName: "PostgreSQL Memory Limit"
+ required: true
+ description: "Maximum amount of memory the PostgreSQL container can use."
+ value: "2048Mi"
+ -
+ name: "MEMORY_MEMCACHED_LIMIT"
+ displayName: "Memcached Memory Limit"
+ required: true
+ description: "Maximum amount of memory the Memcached container can use."
+ value: "256Mi"
+ -
+ name: "POSTGRESQL_IMG_TAG"
+ displayName: "PostgreSQL Image Tag"
+ description: "This is the PostgreSQL image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "MEMCACHED_IMG_TAG"
+ displayName: "Memcached Image Tag"
+ description: "This is the Memcached image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "APPLICATION_IMG_TAG"
+ displayName: "Application Image Tag"
+ description: "This is the Application image tag/version requested to deploy."
+ value: "latest"
+ -
+ name: "APPLICATION_DOMAIN"
+ displayName: "Application Hostname"
+ description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted."
+ value: ""
+ -
+ name: "APPLICATION_INIT_DELAY"
+ displayName: "Application Init Delay"
+ required: true
+ description: "Delay in seconds before we attempt to initialize the application."
+ value: "30"
+ -
+ name: "APPLICATION_VOLUME_CAPACITY"
+ displayName: "Application Volume Capacity"
+ required: true
+ description: "Volume space available for application data."
+ value: "1Gi"
+ -
+ name: "DATABASE_VOLUME_CAPACITY"
+ displayName: "Database Volume Capacity"
+ required: true
+ description: "Volume space available for database."
+ value: "1Gi"
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/README.md b/roles/openshift_examples/files/examples/v1.5/db-templates/README.md
new file mode 100644
index 000000000..a36d7ba7d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/README.md
@@ -0,0 +1,76 @@
+OpenShift 3 Database Examples
+=============================
+
+This directory contains example JSON templates to deploy databases in OpenShift.
+They can be used to immediately instantiate a database and expose it as a
+service in the current project, or to add a template that can be later used from
+the Web Console or the CLI.
+
+The examples can also be tweaked to create new templates.
+
+
+## Ephemeral vs. Persistent
+
+For each supported database, there are two template files.
+
+Files named `*-ephemeral-template.json` use
+"[emptyDir](https://docs.openshift.org/latest/dev_guide/volumes.html)" volumes
+for data storage, which means that data is lost after a pod restart.
+This is tolerable for experimenting, but not suitable for production use.
+
+The other templates, named `*-persistent-template.json`, use [persistent volume
+claims](https://docs.openshift.org/latest/architecture/additional_concepts/storage.html#persistent-volume-claims)
+to request persistent storage provided by [persistent
+volumes](https://docs.openshift.org/latest/architecture/additional_concepts/storage.html#persistent-volumes),
+that must have been created upfront.
+
+
+## Usage
+
+### Instantiating a new database service
+
+Use these instructions if you want to quickly deploy a new database service in
+your current project. Instantiate a new database service with this command:
+
+ $ oc new-app /path/to/template.json
+
+Replace `/path/to/template.json` with an appropriate path, that can be either a
+local path or an URL. Example:
+
+ $ oc new-app https://raw.githubusercontent.com/openshift/origin/master/examples/db-templates/mongodb-ephemeral-template.json
+
+The parameters listed in the output above can be tweaked by specifying values in
+the command line with the `-p` option:
+
+ $ oc new-app examples/db-templates/mongodb-ephemeral-template.json -p DATABASE_SERVICE_NAME=mydb -p MONGODB_USER=default
+
+Note that the persistent template requires an existing persistent volume,
+otherwise the deployment won't ever succeed.
+
+
+### Adding a database as a template
+
+Use these instructions if, instead of instantiating a service right away, you
+want to load the template into an OpenShift project so that it can be used
+later. Create the template with this command:
+
+ $ oc create -f /path/to/template.json
+
+Replace `/path/to/template.json` with an appropriate path, that can be either a
+local path or an URL. Example:
+
+ $ oc create -f https://raw.githubusercontent.com/openshift/origin/master/examples/db-templates/mongodb-ephemeral-template.json
+ template "mongodb-ephemeral" created
+
+The new template is now available to use in the Web Console or with `oc
+new-app`.
+
+
+## More information
+
+The usage of each supported database image is further documented in the links
+below:
+
+- [MySQL](https://docs.openshift.org/latest/using_images/db_images/mysql.html)
+- [PostgreSQL](https://docs.openshift.org/latest/using_images/db_images/postgresql.html)
+- [MongoDB](https://docs.openshift.org/latest/using_images/db_images/mongodb.html)
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-ephemeral-template.json
new file mode 100644
index 000000000..cfbfc3e20
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-ephemeral-template.json
@@ -0,0 +1,225 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mariadb-ephemeral",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB (Ephemeral)",
+ "description": "MariaDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-mariadb",
+ "tags": "database,mariadb"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.",
+ "labels": {
+ "template": "mariadb-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mariadb",
+ "port": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mariadb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mariadb:10.1",
+ "namespace": "${NAMESPACE}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mariadb",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c",
+ "MYSQL_PWD=\"$MYSQL_PASSWORD\" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${MYSQL_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "imagePullPolicy": "IfNotPresent"
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mariadb",
+ "required": true
+ },
+ {
+ "name": "MYSQL_USER",
+ "displayName": "MariaDB Connection Username",
+ "description": "Username for MariaDB user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "displayName": "MariaDB Connection Password",
+ "description": "Password for the MariaDB connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MariaDB root Password",
+ "description": "Password for the MariaDB root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "displayName": "MariaDB Database Name",
+ "description": "Name of the MariaDB database accessed.",
+ "value": "sampledb",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-persistent-template.json
new file mode 100644
index 000000000..e933eecf0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mariadb-persistent-template.json
@@ -0,0 +1,249 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mariadb-persistent",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB (Persistent)",
+ "description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-mariadb",
+ "tags": "database,mariadb"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.",
+ "labels": {
+ "template": "mariadb-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mariadb",
+ "port": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mariadb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mariadb:10.1",
+ "namespace": "${NAMESPACE}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mariadb",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c",
+ "MYSQL_PWD=\"$MYSQL_PASSWORD\" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${MYSQL_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "imagePullPolicy": "IfNotPresent"
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mariadb",
+ "required": true
+ },
+ {
+ "name": "MYSQL_USER",
+ "displayName": "MariaDB Connection Username",
+ "description": "Username for MariaDB user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "displayName": "MariaDB Connection Password",
+ "description": "Password for the MariaDB connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MariaDB root Password",
+ "description": "Password for the MariaDB root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "displayName": "MariaDB Database Name",
+ "description": "Name of the MariaDB database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "1Gi",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-ephemeral-template.json
new file mode 100644
index 000000000..8b8fcb58b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-ephemeral-template.json
@@ -0,0 +1,253 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb-ephemeral",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "MongoDB (Ephemeral)",
+ "description": "MongoDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-mongodb",
+ "tags": "database,mongodb"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
+ "labels": {
+ "template": "mongodb-ephemeral-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MONGODB_USER}",
+ "database-password" : "${MONGODB_PASSWORD}",
+ "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mongo",
+ "protocol": "TCP",
+ "port": 27017,
+ "targetPort": 27017,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mongodb:${MONGODB_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mongodb",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 3,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 27017
+ }
+ },
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-admin-password"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${MONGODB_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mongodb/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mongodb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_USER",
+ "displayName": "MongoDB Connection Username",
+ "description": "Username for MongoDB user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "displayName": "MongoDB Connection Password",
+ "description": "Password for the MongoDB connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "displayName": "MongoDB Database Name",
+ "description": "Name of the MongoDB database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "displayName": "MongoDB Admin Password",
+ "description": "Password for the database admin user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_VERSION",
+ "displayName": "Version of MongoDB Image",
+ "description": "Version of MongoDB image to be used (2.4, 2.6, 3.2 or latest).",
+ "value": "3.2",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-persistent-template.json
new file mode 100644
index 000000000..72d3a8556
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mongodb-persistent-template.json
@@ -0,0 +1,277 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "MongoDB (Persistent)",
+ "description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-mongodb",
+ "tags": "database,mongodb"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
+ "labels": {
+ "template": "mongodb-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MONGODB_USER}",
+ "database-password" : "${MONGODB_PASSWORD}",
+ "database-admin-password" : "${MONGODB_ADMIN_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mongo",
+ "protocol": "TCP",
+ "port": 27017,
+ "targetPort": 27017,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mongodb:${MONGODB_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mongodb",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 3,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 27017
+ }
+ },
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-admin-password"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${MONGODB_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mongodb/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mongodb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_USER",
+ "displayName": "MongoDB Connection Username",
+ "description": "Username for MongoDB user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "displayName": "MongoDB Connection Password",
+ "description": "Password for the MongoDB connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "displayName": "MongoDB Database Name",
+ "description": "Name of the MongoDB database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "displayName": "MongoDB Admin Password",
+ "description": "Password for the database admin user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "MONGODB_VERSION",
+ "displayName": "Version of MongoDB Image",
+ "description": "Version of MongoDB image to be used (2.4, 2.6, 3.2 or latest).",
+ "value": "3.2",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-ephemeral-template.json
new file mode 100644
index 000000000..34dd2ed78
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-ephemeral-template.json
@@ -0,0 +1,253 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql-ephemeral",
+ "annotations": {
+ "openshift.io/display-name": "MySQL (Ephemeral)",
+ "description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-mysql-database",
+ "tags": "database,mysql"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.",
+ "labels": {
+ "template": "mysql-ephemeral-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "protocol": "TCP",
+ "port": 3306,
+ "targetPort": 3306,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mysql:${MYSQL_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mysql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c",
+ "MYSQL_PWD=\"$MYSQL_PASSWORD\" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${MYSQL_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mysql",
+ "required": true
+ },
+ {
+ "name": "MYSQL_USER",
+ "displayName": "MySQL Connection Username",
+ "description": "Username for MySQL user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "displayName": "MySQL Connection Password",
+ "description": "Password for the MySQL connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MySQL root user Password",
+ "description": "Password for the MySQL root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "displayName": "MySQL Database Name",
+ "description": "Name of the MySQL database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "MYSQL_VERSION",
+ "displayName": "Version of MySQL Image",
+ "description": "Version of MySQL image to be used (5.5, 5.6, 5.7, or latest).",
+ "value": "5.7",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-persistent-template.json
new file mode 100644
index 000000000..85c48da01
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/mysql-persistent-template.json
@@ -0,0 +1,256 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql-persistent",
+ "annotations": {
+ "openshift.io/display-name": "MySQL (Persistent)",
+ "description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-mysql-database",
+ "tags": "database,mysql"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.",
+ "labels": {
+ "template": "mysql-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${MYSQL_USER}",
+ "database-password" : "${MYSQL_PASSWORD}",
+ "database-root-password" : "${MYSQL_ROOT_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "port": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mysql:${MYSQL_VERSION}",
+ "namespace": "${NAMESPACE}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mysql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c",
+ "MYSQL_PWD=\"$MYSQL_PASSWORD\" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-root-password"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${MYSQL_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "imagePullPolicy": "IfNotPresent"
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "mysql",
+ "required": true
+ },
+ {
+ "name": "MYSQL_USER",
+ "displayName": "MySQL Connection Username",
+ "description": "Username for MySQL user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "displayName": "MySQL Connection Password",
+ "description": "Password for the MySQL connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_ROOT_PASSWORD",
+ "displayName": "MySQL root user Password",
+ "description": "Password for the MySQL root user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "displayName": "MySQL Database Name",
+ "description": "Name of the MySQL database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "MYSQL_VERSION",
+ "displayName": "Version of MySQL Image",
+ "description": "Version of MySQL image to be used (5.5, 5.6, 5.7, or latest).",
+ "value": "5.7",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json
new file mode 100644
index 000000000..0d0a2a629
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-ephemeral-template.json
@@ -0,0 +1,235 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "postgresql-ephemeral",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL (Ephemeral)",
+ "description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-postgresql",
+ "tags": "database,postgresql"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
+ "labels": {
+ "template": "postgresql-ephemeral-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${POSTGRESQL_USER}",
+ "database-password" : "${POSTGRESQL_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "protocol": "TCP",
+ "port": 5432,
+ "targetPort": 5432,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "postgresql:${POSTGRESQL_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "psql -h 127.0.0.1 -U $POSTGRESQL_USER -q -d $POSTGRESQL_DATABASE -c 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${POSTGRESQL_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "postgresql",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_USER",
+ "displayName": "PostgreSQL Connection Username",
+ "description": "Username for PostgreSQL user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "displayName": "PostgreSQL Connection Password",
+ "description": "Password for the PostgreSQL connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "displayName": "PostgreSQL Database Name",
+ "description": "Name of the PostgreSQL database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_VERSION",
+ "displayName": "Version of PostgreSQL Image",
+ "description": "Version of PostgreSQL image to be used (9.2, 9.4, 9.5 or latest).",
+ "value": "9.5",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json
new file mode 100644
index 000000000..257726cfd
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/postgresql-persistent-template.json
@@ -0,0 +1,259 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "postgresql-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL (Persistent)",
+ "description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-postgresql",
+ "tags": "database,postgresql"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
+ "labels": {
+ "template": "postgresql-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "stringData" : {
+ "database-user" : "${POSTGRESQL_USER}",
+ "database-password" : "${POSTGRESQL_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "protocol": "TCP",
+ "port": 5432,
+ "targetPort": 5432,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "postgresql:${POSTGRESQL_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "psql -h 127.0.0.1 -U $POSTGRESQL_USER -q -d $POSTGRESQL_DATABASE -c 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-user"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${DATABASE_SERVICE_NAME}",
+ "key" : "database-password"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${POSTGRESQL_DATABASE}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "postgresql",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_USER",
+ "displayName": "PostgreSQL Connection Username",
+ "description": "Username for PostgreSQL user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "displayName": "PostgreSQL Connection Password",
+ "description": "Password for the PostgreSQL connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "displayName": "PostgreSQL Database Name",
+ "description": "Name of the PostgreSQL database accessed.",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_VERSION",
+ "displayName": "Version of PostgreSQL Image",
+ "description": "Version of PostgreSQL image to be used (9.2, 9.4, 9.5 or latest).",
+ "value": "9.5",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json
new file mode 100644
index 000000000..c9ae8a539
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-ephemeral-template.json
@@ -0,0 +1,191 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redis-ephemeral",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "Redis (Ephemeral)",
+ "description": "Redis in-memory data structure store, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-redis",
+ "tags": "database,redis"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Password: ${REDIS_PASSWORD}\n Connection URL: redis://${DATABASE_SERVICE_NAME}:6379/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.",
+ "labels": {
+ "template": "redis-ephemeral-template"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "redis",
+ "protocol": "TCP",
+ "port": 6379,
+ "targetPort": 6379,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "redis"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "redis:${REDIS_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "redis",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 6379,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "test \"$(redis-cli -h 127.0.0.1 -a $REDIS_PASSWORD ping)\" == \"PONG\""]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 6379
+ }
+ },
+ "env": [
+ {
+ "name": "REDIS_PASSWORD",
+ "value": "${REDIS_PASSWORD}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/redis/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "redis",
+ "required": true
+ },
+ {
+ "name": "REDIS_PASSWORD",
+ "displayName": "Redis Connection Password",
+ "description": "Password for the Redis connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "REDIS_VERSION",
+ "displayName": "Version of Redis Image",
+ "description": "Version of Redis image to be used (3.2 or latest).",
+ "value": "3.2",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json
new file mode 100644
index 000000000..e9db9ec9d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/db-templates/redis-persistent-template.json
@@ -0,0 +1,215 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redis-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "Redis (Persistent)",
+ "description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-redis",
+ "tags": "database,redis"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Password: ${REDIS_PASSWORD}\n Connection URL: redis://${DATABASE_SERVICE_NAME}:6379/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.",
+ "labels": {
+ "template": "redis-persistent-template"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "redis",
+ "protocol": "TCP",
+ "port": 6379,
+ "targetPort": 6379,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "redis"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "redis:${REDIS_VERSION}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "redis",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 6379,
+ "protocol": "TCP"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "test \"$(redis-cli -h 127.0.0.1 -a $REDIS_PASSWORD ping)\" == \"PONG\""]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 6379
+ }
+ },
+ "env": [
+ {
+ "name": "REDIS_PASSWORD",
+ "value": "${REDIS_PASSWORD}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/redis/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "description": "The name of the OpenShift Service exposed for the database.",
+ "value": "redis",
+ "required": true
+ },
+ {
+ "name": "REDIS_PASSWORD",
+ "displayName": "Redis Connection Password",
+ "description": "Password for the Redis connection user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "REDIS_VERSION",
+ "displayName": "Version of Redis Image",
+ "description": "Version of Redis image to be used (3.2 or latest).",
+ "value": "3.2",
+ "required": true
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/dotnet_imagestreams.json b/roles/openshift_examples/files/examples/v1.5/image-streams/dotnet_imagestreams.json
new file mode 100644
index 000000000..0d5ac21d8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/image-streams/dotnet_imagestreams.json
@@ -0,0 +1,76 @@
+{
+ "kind": "ImageStreamList",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dotnet-image-streams",
+ "annotations": {
+ "description": "ImageStream definitions for .NET Core on RHEL"
+ }
+ },
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dotnet",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core (Latest)",
+ "description": "Build and run .NET Core applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of .NET Core available on OpenShift, including major versions updates.",
+ "iconClass": "icon-dotnet",
+ "tags": "builder,.net,dotnet,dotnetcore",
+ "supports":"dotnet",
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git",
+ "sampleContextDir": "1.1/test/asp-net-hello-world"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "1.1"
+ }
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core 1.1",
+ "description": "Build and run .NET Core 1.1 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.1/README.md.",
+ "iconClass": "icon-dotnet",
+ "tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore11",
+ "supports":"dotnet:1.1,dotnet",
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git",
+ "sampleContextDir": "1.1/test/asp-net-hello-world",
+ "version": "1.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/dotnet/dotnetcore-11-rhel7:1.1"
+ }
+ },
+ {
+ "name": "1.0",
+ "annotations": {
+ "openshift.io/display-name": ".NET Core 1.0",
+ "description": "Build and run .NET Core 1.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/redhat-developer/s2i-dotnetcore/tree/master/1.0/README.md.",
+ "iconClass": "icon-dotnet",
+ "tags": "builder,.net,dotnet,dotnetcore,rh-dotnetcore10",
+ "supports":"dotnet:1.0,dotnet",
+ "sampleRepo": "https://github.com/redhat-developer/s2i-dotnetcore.git",
+ "sampleContextDir": "1.0/test/asp-net-hello-world",
+ "version": "1.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/dotnet/dotnetcore-10-rhel7:1.0"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json
new file mode 100644
index 000000000..1a90a9409
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-centos7.json
@@ -0,0 +1,829 @@
+{
+ "kind": "ImageStreamList",
+ "apiVersion": "v1",
+ "metadata": {},
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "ruby",
+ "annotations": {
+ "openshift.io/display-name": "Ruby"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Ruby (Latest)",
+ "description": "Build and run Ruby applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.3/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2.3"
+ }
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "openshift.io/display-name": "Ruby 2.0",
+ "description": "Build and run Ruby 2.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.",
+ "iconClass": "icon-ruby",
+ "tags": "hidden,builder,ruby",
+ "supports": "ruby:2.0,ruby",
+ "version": "2.0",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/ruby-20-centos7:latest"
+ }
+ },
+ {
+ "name": "2.2",
+ "annotations": {
+ "openshift.io/display-name": "Ruby 2.2",
+ "description": "Build and run Ruby 2.2 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.2/README.md.",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.2,ruby",
+ "version": "2.2",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/ruby-22-centos7:latest"
+ }
+ },
+ {
+ "name": "2.3",
+ "annotations": {
+ "openshift.io/display-name": "Ruby 2.3",
+ "description": "Build and run Ruby 2.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.3/README.md.",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.3,ruby",
+ "version": "2.3",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/ruby-23-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs",
+ "annotations": {
+ "openshift.io/display-name": "Node.js"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Node.js (Latest)",
+ "description": "Build and run Node.js applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "4"
+ }
+ },
+ {
+ "name": "0.10",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 0.10",
+ "description": "DEPRECATED: Build and run Node.js 0.10 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.",
+ "iconClass": "icon-nodejs",
+ "tags": "hidden,nodejs",
+ "supports":"nodejs:0.10,nodejs:0.1,nodejs",
+ "version": "0.10",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/nodejs-010-centos7:latest"
+ }
+ },
+ {
+ "name": "4",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 4",
+ "description": "Build and run Node.js 4 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:4,nodejs",
+ "version": "4",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/nodejs-4-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "perl",
+ "annotations": {
+ "openshift.io/display-name": "Perl"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Perl (Latest)",
+ "description": "Build and run Perl applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Perl available on OpenShift, including major versions updates.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "5.24"
+ }
+ },
+ {
+ "name": "5.16",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.16",
+ "description": "Build and run Perl 5.16 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "hidden,builder,perl",
+ "supports":"perl:5.16,perl",
+ "version": "5.16",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/perl-516-centos7:latest"
+ }
+ },
+ {
+ "name": "5.20",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.20",
+ "description": "Build and run Perl 5.20 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.20,perl",
+ "version": "5.20",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/perl-520-centos7:latest"
+ }
+ },
+ {
+ "name": "5.24",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.24",
+ "description": "Build and run Perl 5.24 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.24/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.24,perl",
+ "version": "5.24",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/perl-524-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "php",
+ "annotations": {
+ "openshift.io/display-name": "PHP"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "PHP (Latest)",
+ "description": "Build and run PHP applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "7.0"
+ }
+ },
+ {
+ "name": "5.5",
+ "annotations": {
+ "openshift.io/display-name": "PHP 5.5",
+ "description": "Build and run PHP 5.5 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.",
+ "iconClass": "icon-php",
+ "tags": "hidden,builder,php",
+ "supports":"php:5.5,php",
+ "version": "5.5",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/php-55-centos7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "annotations": {
+ "openshift.io/display-name": "PHP 5.6",
+ "description": "Build and run PHP 5.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:5.6,php",
+ "version": "5.6",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/php-56-centos7:latest"
+ }
+ },
+ {
+ "name": "7.0",
+ "annotations": {
+ "openshift.io/display-name": "PHP 7.0",
+ "description": "Build and run PHP 7.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.0/README.md.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:7.0,php",
+ "version": "7.0",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/php-70-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "python",
+ "annotations": {
+ "openshift.io/display-name": "Python"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Python (Latest)",
+ "description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.5"
+ }
+ },
+ {
+ "name": "3.3",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.3",
+ "description": "Build and run Python 3.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.",
+ "iconClass": "icon-python",
+ "tags": "hidden,builder,python",
+ "supports":"python:3.3,python",
+ "version": "3.3",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/python-33-centos7:latest"
+ }
+ },
+ {
+ "name": "2.7",
+ "annotations": {
+ "openshift.io/display-name": "Python 2.7",
+ "description": "Build and run Python 2.7 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/2.7/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:2.7,python",
+ "version": "2.7",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/python-27-centos7:latest"
+ }
+ },
+ {
+ "name": "3.4",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.4",
+ "description": "Build and run Python 3.4 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.4/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.4,python",
+ "version": "3.4",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/python-34-centos7:latest"
+ }
+ },
+ {
+ "name": "3.5",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.5",
+ "description": "Build and run Python 3.5 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.5,python",
+ "version": "3.5",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/python-35-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "wildfly",
+ "annotations": {
+ "openshift.io/display-name": "WildFly"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "WildFly (Latest)",
+ "description": "Build and run WildFly applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of WildFly available on OpenShift, including major versions updates.",
+ "iconClass": "icon-wildfly",
+ "tags": "builder,wildfly,java",
+ "supports":"jee,java",
+ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "10.1"
+ }
+ },
+ {
+ "name": "8.1",
+ "annotations": {
+ "openshift.io/display-name": "WildFly 8.1",
+ "description": "Build and run WildFly 8.1 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
+ "iconClass": "icon-wildfly",
+ "tags": "builder,wildfly,java",
+ "supports":"wildfly:8.1,jee,java",
+ "version": "8.1",
+ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/wildfly-81-centos7:latest"
+ }
+ },
+ {
+ "name": "9.0",
+ "annotations": {
+ "openshift.io/display-name": "WildFly 9.0",
+ "description": "Build and run WildFly 9.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
+ "iconClass": "icon-wildfly",
+ "tags": "builder,wildfly,java",
+ "supports":"wildfly:9.0,jee,java",
+ "version": "9.0",
+ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/wildfly-90-centos7:latest"
+ }
+ },
+ {
+ "name": "10.0",
+ "annotations": {
+ "openshift.io/display-name": "WildFly 10.0",
+ "description": "Build and run WildFly 10.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
+ "iconClass": "icon-wildfly",
+ "tags": "builder,wildfly,java",
+ "supports":"wildfly:10.0,jee,java",
+ "version": "10.0",
+ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/wildfly-100-centos7:latest"
+ }
+ },
+ {
+ "name": "10.1",
+ "annotations": {
+ "openshift.io/display-name": "WildFly 10.1",
+ "description": "Build and run WildFly 10.1 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
+ "iconClass": "icon-wildfly",
+ "tags": "builder,wildfly,java",
+ "supports":"wildfly:10.1,jee,java",
+ "version": "10.1",
+ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/wildfly-101-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql",
+ "annotations": {
+ "openshift.io/display-name": "MySQL"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "MySQL (Latest)",
+ "description": "Provides a MySQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MySQL available on OpenShift, including major versions updates.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "5.7"
+ }
+ },
+ {
+ "name": "5.5",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.5",
+ "description": "Provides a MySQL 5.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "hidden,mysql",
+ "version": "5.5"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/mysql-55-centos7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.6",
+ "description": "Provides a MySQL 5.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql",
+ "version": "5.6"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/mysql-56-centos7:latest"
+ }
+ },
+ {
+ "name": "5.7",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.7",
+ "description": "Provides a MySQL 5.7 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.7/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql",
+ "version": "5.7"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/mysql-57-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mariadb",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB (Latest)",
+ "description": "Provides a MariaDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.",
+ "iconClass": "icon-mariadb",
+ "tags": "mariadb"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "10.1"
+ }
+ },
+ {
+ "name": "10.1",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB 10.1",
+ "description": "Provides a MariaDB 10.1 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.",
+ "iconClass": "icon-mariadb",
+ "tags": "mariadb",
+ "version": "10.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/mariadb-101-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "postgresql",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL (Latest)",
+ "description": "Provides a PostgreSQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.",
+ "iconClass": "icon-postgresql",
+ "tags": "postgresql"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "9.5"
+ }
+ },
+ {
+ "name": "9.2",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL 9.2",
+ "description": "Provides a PostgreSQL 9.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.",
+ "iconClass": "icon-postgresql",
+ "tags": "hidden,postgresql",
+ "version": "9.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/postgresql-92-centos7:latest"
+ }
+ },
+ {
+ "name": "9.4",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL 9.4",
+ "description": "Provides a PostgreSQL 9.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.",
+ "iconClass": "icon-postgresql",
+ "tags": "postgresql",
+ "version": "9.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/postgresql-94-centos7:latest"
+ }
+ },
+ {
+ "name": "9.5",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL 9.5",
+ "description": "Provides a PostgreSQL 9.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.",
+ "iconClass": "icon-postgresql",
+ "tags": "postgresql",
+ "version": "9.5"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/postgresql-95-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB (Latest)",
+ "description": "Provides a MongoDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.",
+ "iconClass": "icon-mongodb",
+ "tags": "mongodb"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.2"
+ }
+ },
+ {
+ "name": "2.4",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB 2.4",
+ "description": "Provides a MongoDB 2.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.",
+ "iconClass": "icon-mongodb",
+ "tags": "hidden,mongodb",
+ "version": "2.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/mongodb-24-centos7:latest"
+ }
+ },
+ {
+ "name": "2.6",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB 2.6",
+ "description": "Provides a MongoDB 2.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.",
+ "iconClass": "icon-mongodb",
+ "tags": "mongodb",
+ "version": "2.6"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/mongodb-26-centos7:latest"
+ }
+ },
+ {
+ "name": "3.2",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB 3.2",
+ "description": "Provides a MongoDB 3.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.",
+ "iconClass": "icon-mongodb",
+ "tags": "mongodb",
+ "version": "3.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/mongodb-32-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redis",
+ "annotations": {
+ "openshift.io/display-name": "Redis"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Redis (Latest)",
+ "description": "Provides a Redis database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Redis available on OpenShift, including major versions updates.",
+ "iconClass": "icon-redis",
+ "tags": "redis"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.2"
+ }
+ },
+ {
+ "name": "3.2",
+ "annotations": {
+ "openshift.io/display-name": "Redis 3.2",
+ "description": "Provides a Redis 3.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.",
+ "iconClass": "icon-redis",
+ "tags": "redis",
+ "version": "3.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/redis-32-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins (Latest)",
+ "description": "Provides a Jenkins server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Jenkins available on OpenShift, including major versions updates.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2"
+ }
+ },
+ {
+ "name": "1",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins 1.X",
+ "description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins",
+ "version": "1.x"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/jenkins-1-centos7:latest"
+ }
+ },
+ {
+ "name": "2",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins 2.X",
+ "description": "Provides a Jenkins v2.x server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins",
+ "version": "2.x"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "openshift/jenkins-2-centos7:latest"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json
new file mode 100644
index 000000000..9b19b8bd0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/image-streams/image-streams-rhel7.json
@@ -0,0 +1,736 @@
+{
+ "kind": "ImageStreamList",
+ "apiVersion": "v1",
+ "metadata": {},
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "ruby",
+ "annotations": {
+ "openshift.io/display-name": "Ruby"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Ruby (Latest)",
+ "description": "Build and run Ruby applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.3/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2.3"
+ }
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "openshift.io/display-name": "Ruby 2.0",
+ "description": "Build and run Ruby 2.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.",
+ "iconClass": "icon-ruby",
+ "tags": "hidden,builder,ruby",
+ "supports": "ruby:2.0,ruby",
+ "version": "2.0",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/ruby-20-rhel7:latest"
+ }
+ },
+ {
+ "name": "2.2",
+ "annotations": {
+ "openshift.io/display-name": "Ruby 2.2",
+ "description": "Build and run Ruby 2.2 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.2/README.md.",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.2,ruby",
+ "version": "2.2",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/ruby-22-rhel7:latest"
+ }
+ },
+ {
+ "name": "2.3",
+ "annotations": {
+ "openshift.io/display-name": "Ruby 2.3",
+ "description": "Build and run Ruby 2.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.3/README.md.",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.3,ruby",
+ "version": "2.3",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/ruby-23-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs",
+ "annotations": {
+ "openshift.io/display-name": "Node.js"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Node.js (Latest)",
+ "description": "Build and run Node.js applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "4"
+ }
+ },
+ {
+ "name": "0.10",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 0.10",
+ "description": "DEPRECATED: Build and run Node.js 0.10 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.",
+ "iconClass": "icon-nodejs",
+ "tags": "hidden,nodejs",
+ "supports":"nodejs:0.10,nodejs:0.1,nodejs",
+ "version": "0.10",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/nodejs-010-rhel7:latest"
+ }
+ },
+ {
+ "name": "4",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 4",
+ "description": "Build and run Node.js 4 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:4,nodejs",
+ "version": "4",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/nodejs-4-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "perl",
+ "annotations": {
+ "openshift.io/display-name": "Perl"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Perl (Latest)",
+ "description": "Build and run Perl applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Perl available on OpenShift, including major versions updates.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "5.24"
+ }
+ },
+ {
+ "name": "5.16",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.16",
+ "description": "Build and run Perl 5.16 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "hidden,builder,perl",
+ "supports":"perl:5.16,perl",
+ "version": "5.16",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/perl-516-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.20",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.20",
+ "description": "Build and run Perl 5.20 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.20,perl",
+ "version": "5.20",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/perl-520-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.24",
+ "annotations": {
+ "openshift.io/display-name": "Perl 5.24",
+ "description": "Build and run Perl 5.24 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.24/README.md.",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.24,perl",
+ "version": "5.24",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/perl-524-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "php",
+ "annotations": {
+ "openshift.io/display-name": "PHP"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "PHP (Latest)",
+ "description": "Build and run PHP applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "5.6"
+ }
+ },
+ {
+ "name": "5.5",
+ "annotations": {
+ "openshift.io/display-name": "PHP 5.5",
+ "description": "Build and run PHP 5.5 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.",
+ "iconClass": "icon-php",
+ "tags": "hidden,builder,php",
+ "supports":"php:5.5,php",
+ "version": "5.5",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/php-55-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "annotations": {
+ "openshift.io/display-name": "PHP 5.6",
+ "description": "Build and run PHP 5.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:5.6,php",
+ "version": "5.6",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/php-56-rhel7:latest"
+ }
+ },
+ {
+ "name": "7.0",
+ "annotations": {
+ "openshift.io/display-name": "PHP 7.0",
+ "description": "Build and run PHP 7.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.0/README.md.",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:7.0,php",
+ "version": "7.0",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/php-70-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "python",
+ "annotations": {
+ "openshift.io/display-name": "Python"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Python (Latest)",
+ "description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.5"
+ }
+ },
+ {
+ "name": "3.3",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.3",
+ "description": "Build and run Python 3.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.",
+ "iconClass": "icon-python",
+ "tags": "hidden,builder,python",
+ "supports":"python:3.3,python",
+ "version": "3.3",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/python-33-rhel7:latest"
+ }
+ },
+ {
+ "name": "2.7",
+ "annotations": {
+ "openshift.io/display-name": "Python 2.7",
+ "description": "Build and run Python 2.7 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/2.7/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:2.7,python",
+ "version": "2.7",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/python-27-rhel7:latest"
+ }
+ },
+ {
+ "name": "3.4",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.4",
+ "description": "Build and run Python 3.4 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.4/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.4,python",
+ "version": "3.4",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/python-34-rhel7:latest"
+ }
+ },
+ {
+ "name": "3.5",
+ "annotations": {
+ "openshift.io/display-name": "Python 3.5",
+ "description": "Build and run Python 3.5 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.5,python",
+ "version": "3.5",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/python-35-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql",
+ "annotations": {
+ "openshift.io/display-name": "MySQL"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "MySQL (Latest)",
+ "description": "Provides a MySQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MySQL available on OpenShift, including major versions updates.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "5.7"
+ }
+ },
+ {
+ "name": "5.5",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.5",
+ "description": "Provides a MySQL 5.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "hidden,mysql",
+ "version": "5.5"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/mysql-55-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.6",
+ "description": "Provides a MySQL 5.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql",
+ "version": "5.6"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/mysql-56-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.7",
+ "annotations": {
+ "openshift.io/display-name": "MySQL 5.7",
+ "description": "Provides a MySQL 5.7 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.7/README.md.",
+ "iconClass": "icon-mysql-database",
+ "tags": "mysql",
+ "version": "5.7"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/mysql-57-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mariadb",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB (Latest)",
+ "description": "Provides a MariaDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.",
+ "iconClass": "icon-mariadb",
+ "tags": "mariadb"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "10.1"
+ }
+ },
+ {
+ "name": "10.1",
+ "annotations": {
+ "openshift.io/display-name": "MariaDB 10.1",
+ "description": "Provides a MariaDB 10.1 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.",
+ "iconClass": "icon-mariadb",
+ "tags": "mariadb",
+ "version": "10.1"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/mariadb-101-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "postgresql",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL (Latest)",
+ "description": "Provides a PostgreSQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.",
+ "iconClass": "icon-postgresql",
+ "tags": "postgresql"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "9.5"
+ }
+ },
+ {
+ "name": "9.2",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL 9.2",
+ "description": "Provides a PostgreSQL 9.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.",
+ "iconClass": "icon-postgresql",
+ "tags": "hidden,postgresql",
+ "version": "9.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/postgresql-92-rhel7:latest"
+ }
+ },
+ {
+ "name": "9.4",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL 9.4",
+ "description": "Provides a PostgreSQL 9.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.",
+ "iconClass": "icon-postgresql",
+ "tags": "postgresql",
+ "version": "9.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/postgresql-94-rhel7:latest"
+ }
+ },
+ {
+ "name": "9.5",
+ "annotations": {
+ "openshift.io/display-name": "PostgreSQL 9.5",
+ "description": "Provides a PostgreSQL 9.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.",
+ "iconClass": "icon-postgresql",
+ "tags": "postgresql",
+ "version": "9.5"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/postgresql-95-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB (Latest)",
+ "description": "Provides a MongoDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.",
+ "iconClass": "icon-mongodb",
+ "tags": "mongodb"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.2"
+ }
+ },
+ {
+ "name": "2.4",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB 2.4",
+ "description": "Provides a MongoDB 2.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.",
+ "iconClass": "icon-mongodb",
+ "tags": "hidden,mongodb",
+ "version": "2.4"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/mongodb-24-rhel7:latest"
+ }
+ },
+ {
+ "name": "2.6",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB 2.6",
+ "description": "Provides a MongoDB 2.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.",
+ "iconClass": "icon-mongodb",
+ "tags": "mongodb",
+ "version": "2.6"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/mongodb-26-rhel7:latest"
+ }
+ },
+ {
+ "name": "3.2",
+ "annotations": {
+ "openshift.io/display-name": "MongoDB 3.2",
+ "description": "Provides a MongoDB 3.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.",
+ "iconClass": "icon-mongodb",
+ "tags": "mongodb",
+ "version": "3.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/mongodb-32-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redis",
+ "annotations": {
+ "openshift.io/display-name": "Redis"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Redis (Latest)",
+ "description": "Provides a Redis database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Redis available on OpenShift, including major versions updates.",
+ "iconClass": "icon-redis",
+ "tags": "redis"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "3.2"
+ }
+ },
+ {
+ "name": "3.2",
+ "annotations": {
+ "openshift.io/display-name": "Redis 3.2",
+ "description": "Provides a Redis 3.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.",
+ "iconClass": "icon-redis",
+ "tags": "redis",
+ "version": "3.2"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/redis-32-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins"
+ }
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins (Latest)",
+ "description": "Provides a Jenkins server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Jenkins available on OpenShift, including major versions updates.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins"
+ },
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "2"
+ }
+ },
+ {
+ "name": "1",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins 1.X",
+ "description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins",
+ "version": "1.x"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/jenkins-1-rhel7:latest"
+ }
+ },
+ {
+ "name": "2",
+ "annotations": {
+ "openshift.io/display-name": "Jenkins 2.X",
+ "description": "Provides a Jenkins 2.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
+ "iconClass": "icon-jenkins",
+ "tags": "jenkins",
+ "version": "2.x"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/openshift3/jenkins-2-rhel7:latest"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/README.md b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/README.md
new file mode 100644
index 000000000..62765e03d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/README.md
@@ -0,0 +1,22 @@
+QuickStarts
+===========
+
+QuickStarts provide the basic skeleton of an application. Generally they
+reference a repository containing very simple source code that implements a
+trivial application using a particular framework. In addition they define any
+components needed for the application including a Build configuration,
+supporting services such as Databases, etc.
+
+You can instantiate these templates as is, or fork the source repository they
+reference and supply your forked repository as the source-repository when
+instantiating them.
+
+* [CakePHP](https://raw.githubusercontent.com/openshift/cakephp-ex/master/openshift/templates/cakephp-mysql.json) - Provides a basic CakePHP application with a MySQL database. For more information see the [source repository](https://github.com/openshift/cakephp-ex).
+* [Dancer](https://raw.githubusercontent.com/openshift/dancer-ex/master/openshift/templates/dancer-mysql.json) - Provides a basic Dancer (Perl) application with a MySQL database. For more information see the [source repository](https://github.com/openshift/dancer-ex).
+* [Django](https://raw.githubusercontent.com/openshift/django-ex/master/openshift/templates/django-postgresql.json) - Provides a basic Django (Python) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/django-ex).
+* [NodeJS](https://raw.githubusercontent.com/openshift/nodejs-ex/master/openshift/templates/nodejs-mongodb.json) - Provides a basic NodeJS application with a MongoDB database. For more information see the [source repository](https://github.com/openshift/nodejs-ex).
+* [Rails](https://raw.githubusercontent.com/openshift/rails-ex/master/openshift/templates/rails-postgresql.json) - Provides a basic Rails (Ruby) application with a PostgreSQL database. For more information see the [source repository](https://github.com/openshift/rails-ex).
+
+Note: This file is processed by `hack/update-external-examples.sh`. New examples
+must follow the exact syntax of the existing entries. Files in this directory
+are automatically pulled down, do not modify/add files to this directory.
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/apicast-gateway-template.yml b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/apicast-gateway-template.yml
new file mode 100644
index 000000000..34f5fcbcc
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/apicast-gateway-template.yml
@@ -0,0 +1,149 @@
+apiVersion: v1
+kind: Template
+metadata:
+ creationTimestamp: null
+ name: 3scale-gateway
+ annotations:
+ description: "3scale API Gateway"
+ iconClass: "icon-load-balancer"
+ tags: "api,gateway,3scale"
+objects:
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: ${THREESCALE_GATEWAY_NAME}
+ spec:
+ replicas: 2
+ selector:
+ deploymentconfig: ${THREESCALE_GATEWAY_NAME}
+ strategy:
+ resources: {}
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: ${THREESCALE_GATEWAY_NAME}
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ valueFrom:
+ secretKeyRef:
+ name: ${THREESCALE_PORTAL_ENDPOINT_SECRET}
+ key: password
+ - name: THREESCALE_CONFIG_FILE
+ value: ${THREESCALE_CONFIG_FILE}
+ - name: RESOLVER
+ value: ${RESOLVER}
+ - name: APICAST_SERVICES
+ value: ${APICAST_SERVICES}
+ - name: APICAST_MISSING_CONFIGURATION
+ value: ${MISSING_CONFIGURATION}
+ - name: APICAST_LOG_LEVEL
+ value: ${APICAST_LOG_LEVEL}
+ - name: APICAST_PATH_ROUTING_ENABLED
+ value: ${PATH_ROUTING}
+ - name: APICAST_RESPONSE_CODES
+ value: ${RESPONSE_CODES}
+ - name: APICAST_REQUEST_LOGS
+ value: ${REQUEST_LOGS}
+ - name: APICAST_RELOAD_CONFIG
+ value: ${APICAST_RELOAD_CONFIG}
+ image: ${THREESCALE_GATEWAY_IMAGE}
+ imagePullPolicy: Always
+ name: ${THREESCALE_GATEWAY_NAME}
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: 8090
+ initialDelaySeconds: 10
+ timeoutSeconds: 1
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: 8090
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ resources: {}
+ terminationMessagePath: /dev/termination-log
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ securityContext: {}
+ terminationGracePeriodSeconds: 30
+ triggers:
+ - type: ConfigChange
+ status: {}
+- apiVersion: v1
+ kind: Service
+ metadata:
+ creationTimestamp: null
+ name: ${THREESCALE_GATEWAY_NAME}
+ spec:
+ ports:
+ - name: 8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ selector:
+ deploymentconfig: ${THREESCALE_GATEWAY_NAME}
+ sessionAffinity: None
+ type: ClusterIP
+ status:
+ loadBalancer: {}
+parameters:
+- description: "Name of the secret containing the THREESCALE_PORTAL_ENDPOINT with the access-token or provider key"
+ value: threescale-portal-endpoint-secret
+ name: THREESCALE_PORTAL_ENDPOINT_SECRET
+ required: true
+- description: "Path to saved JSON file with configuration for the gateway. Has to be injected to the docker image as read only volume."
+ value:
+ name: THREESCALE_CONFIG_FILE
+ required: false
+- description: "Name for the 3scale API Gateway"
+ value: threescalegw
+ name: THREESCALE_GATEWAY_NAME
+ required: true
+- description: "Docker image to use."
+ value: 'rhamp10/apicast-gateway:1.0.0-4'
+ name: THREESCALE_GATEWAY_IMAGE
+ required: true
+- description: "DNS Resolver for openresty, if empty it will be autodiscovered"
+ value:
+ name: RESOLVER
+ required: false
+- description: "Subset of services to run. Use comma separated list of service ids (eg. 42,1337)"
+ value:
+ name: APICAST_SERVICES
+ required: false
+- description: "What to do on missing or invalid configuration. Allowed values are: log, exit."
+ value: exit
+ required: false
+ name: MISSING_CONFIGURATION
+- description: "Log level. One of the following: debug, info, notice, warn, error, crit, alert, or emerg."
+ name: APICAST_LOG_LEVEL
+ required: false
+- description: "Enable path routing. Experimental feature."
+ name: PATH_ROUTING
+ required: false
+ value: "false"
+- description: "Enable traffic logging to 3scale. Includes whole request and response."
+ value: "false"
+ name: REQUEST_LOGS
+ required: false
+- description: "Enable logging response codes to 3scale."
+ value: "false"
+ name: RESPONSE_CODES
+ required: false
+- description: "Reload config on every request"
+ value: "false"
+ name: APICAST_RELOAD_CONFIG
+ required: false
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json
new file mode 100644
index 000000000..9dbbf89d1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/cakephp-mysql.json
@@ -0,0 +1,531 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-mysql-example",
+ "annotations": {
+ "openshift.io/display-name": "CakePHP + MySQL (Ephemeral)",
+ "description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,php,cakephp",
+ "iconClass": "icon-php"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
+ "labels": {
+ "template": "cakephp-mysql-example"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "databaseUser" : "${DATABASE_USER}",
+ "databasePassword" : "${DATABASE_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "php:5.6"
+ },
+ "env": [
+ {
+ "name": "COMPOSER_MIRROR",
+ "value": "${COMPOSER_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "recreateParams": {
+ "pre": {
+ "failurePolicy": "Retry",
+ "execNewPod": {
+ "command": [
+ "./migrate-database.sh"
+ ],
+ "containerName": "cakephp-mysql-example"
+ }
+ }
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "cakephp-mysql-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "cakephp-mysql-example",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/health.php",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "value": "${DATABASE_ENGINE}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "DATABASE_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
+ },
+ {
+ "name": "CAKEPHP_SECRET_TOKEN",
+ "value": "${CAKEPHP_SECRET_TOKEN}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_SALT",
+ "value": "${CAKEPHP_SECURITY_SALT}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_CIPHER_SEED",
+ "value": "${CAKEPHP_SECURITY_CIPHER_SEED}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "value": "${OPCACHE_REVALIDATE_FREQ}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "mysql:5.6"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "data",
+ "emptyDir": {}
+ }
+ ],
+ "containers": [
+ {
+ "name": "mysql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "volumeMounts": [
+ {
+ "name": "data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "MYSQL_PWD='${DATABASE_PASSWORD}' mysql -h 127.0.0.1 -u ${DATABASE_USER} -D ${DATABASE_NAME} -e 'SELECT 1'" ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_MYSQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "cakephp-mysql-example"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the CakePHP container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_MYSQL_LIMIT",
+ "displayName": "Memory Limit (MySQL)",
+ "description": "Maximum amount of memory the MySQL container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/cakephp-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the CakePHP service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "mysql"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "displayName": "Database Engine",
+ "description": "Database engine: postgresql, mysql or sqlite (default).",
+ "required": true,
+ "value": "mysql"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "default"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database User",
+ "required": true,
+ "value": "cakephp"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "CAKEPHP_SECRET_TOKEN",
+ "displayName": "CakePHP secret token",
+ "description": "Set this to a long random string.",
+ "generate": "expression",
+ "from": "[\\w]{50}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_SALT",
+ "displayName": "CakePHP Security Salt",
+ "description": "Security salt for session hash.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_CIPHER_SEED",
+ "displayName": "CakePHP Security Cipher Seed",
+ "description": "Security cipher seed for session hash.",
+ "generate": "expression",
+ "from": "[0-9]{30}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "displayName": "OPcache Revalidation Frequency",
+ "description": "How often to check script timestamps for updates, in seconds. 0 will result in OPcache checking for updates on every request.",
+ "value": "2"
+ },
+ {
+ "name": "COMPOSER_MIRROR",
+ "displayName": "Custom Composer Mirror URL",
+ "description": "The custom Composer mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json
new file mode 100644
index 000000000..dccb8bf7f
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/dancer-mysql.json
@@ -0,0 +1,487 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-mysql-example",
+ "annotations": {
+ "openshift.io/display-name": "Dancer + MySQL (Ephemeral)",
+ "description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,perl,dancer",
+ "iconClass": "icon-perl"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
+ "labels": {
+ "template": "dancer-mysql-example"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "databaseUser" : "${DATABASE_USER}",
+ "databasePassword" : "${DATABASE_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "perl:5.20"
+ },
+ "env": [
+ {
+ "name": "CPAN_MIRROR",
+ "value": "${CPAN_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "perl -I extlib/lib/perl5 -I lib t/*"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "dancer-mysql-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "dancer-mysql-example",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/health",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "value": "${SECRET_KEY_BASE}"
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "value": "${PERL_APACHE2_RELOAD}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "mysql:5.6"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "data",
+ "emptyDir": {}
+ }
+ ],
+ "containers": [
+ {
+ "name": "mysql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "volumeMounts": [
+ {
+ "name": "data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "MYSQL_PWD='${DATABASE_PASSWORD}' mysql -h 127.0.0.1 -u ${DATABASE_USER} -D ${DATABASE_NAME} -e 'SELECT 1'" ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 3306
+ }
+ },
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_MYSQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "dancer-mysql-example"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the Perl Dancer container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_MYSQL_LIMIT",
+ "displayName": "Memory Limit (MySQL)",
+ "description": "Maximum amount of memory the MySQL container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/dancer-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Dancer service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "database"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{8}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "sampledb"
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "displayName": "Perl Module Reload",
+ "description": "Set this to \"true\" to enable automatic reloading of modified Perl modules.",
+ "value": ""
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "displayName": "Secret Key",
+ "description": "Your secret key for verifying the integrity of signed cookies.",
+ "generate": "expression",
+ "from": "[a-z0-9]{127}"
+ },
+ {
+ "name": "CPAN_MIRROR",
+ "displayName": "Custom CPAN Mirror URL",
+ "description": "The custom CPAN mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json
new file mode 100644
index 000000000..59ff8a988
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/django-postgresql.json
@@ -0,0 +1,500 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-psql-example",
+ "annotations": {
+ "openshift.io/display-name": "Django + PostgreSQL (Ephemeral)",
+ "description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,python,django",
+ "iconClass": "icon-python"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
+ "labels": {
+ "template": "django-psql-example"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "databaseUser" : "${DATABASE_USER}",
+ "databasePassword" : "${DATABASE_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "python:3.5"
+ },
+ "env": [
+ {
+ "name": "PIP_INDEX_URL",
+ "value": "${PIP_INDEX_URL}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "./manage.py test"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "django-psql-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "django-psql-example",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/health",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/health",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "value": "${DATABASE_ENGINE}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "DATABASE_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
+ },
+ {
+ "name": "APP_CONFIG",
+ "value": "${APP_CONFIG}"
+ },
+ {
+ "name": "DJANGO_SECRET_KEY",
+ "value": "${DJANGO_SECRET_KEY}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "postgresql:9.5"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "data",
+ "emptyDir": {}
+ }
+ ],
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "name": "data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_POSTGRESQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "django-psql-example"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "required": true,
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "required": true,
+ "description": "Maximum amount of memory the Django container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_POSTGRESQL_LIMIT",
+ "displayName": "Memory Limit (PostgreSQL)",
+ "required": true,
+ "description": "Maximum amount of memory the PostgreSQL container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "description": "The URL of the repository with your application source code.",
+ "value": "https://github.com/openshift/django-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Django service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "displayName": "Database Engine",
+ "required": true,
+ "description": "Database engine: postgresql, mysql or sqlite (default).",
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "default"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "required": true,
+ "value": "django"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database User Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "APP_CONFIG",
+ "displayName": "Application Configuration File Path",
+ "description": "Relative path to Gunicorn configuration file (optional)."
+ },
+ {
+ "name": "DJANGO_SECRET_KEY",
+ "displayName": "Django Secret Key",
+ "description": "Set this to a long random string.",
+ "generate": "expression",
+ "from": "[\\w]{50}"
+ },
+ {
+ "name": "PIP_INDEX_URL",
+ "displayName": "Custom PyPi Index URL",
+ "description": "The custom PyPi index URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json
new file mode 100644
index 000000000..62ccc5b7f
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-ephemeral-template.json
@@ -0,0 +1,275 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins-ephemeral",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "Jenkins (Ephemeral)",
+ "description": "Jenkins service, without persistent storage.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "iconClass": "icon-jenkins",
+ "tags": "instant-app,jenkins"
+ }
+ },
+ "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "objects": [
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "tls": {
+ "termination": "edge",
+ "insecureEdgeTerminationPolicy": "Redirect"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "jenkins"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${JENKINS_IMAGE_STREAM_TAG}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${JENKINS_SERVICE_NAME}",
+ "containers": [
+ {
+ "name": "jenkins",
+ "image": " ",
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/login",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 420,
+ "failureThreshold" : 30,
+ "httpGet": {
+ "path": "/login",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "OPENSHIFT_ENABLE_OAUTH",
+ "value": "${ENABLE_OAUTH}"
+ },
+ {
+ "name": "OPENSHIFT_ENABLE_REDIRECT_PROMPT",
+ "value": "true"
+ },
+ {
+ "name": "KUBERNETES_MASTER",
+ "value": "https://kubernetes.default:443"
+ },
+ {
+ "name": "KUBERNETES_TRUST_CERTIFICATES",
+ "value": "true"
+ },
+ {
+ "name": "JNLP_SERVICE_NAME",
+ "value": "${JNLP_SERVICE_NAME}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${JENKINS_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/jenkins"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${JENKINS_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ }
+ },
+ {
+ "kind": "ServiceAccount",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "serviceaccounts.openshift.io/oauth-redirectreference.jenkins": "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"${JENKINS_SERVICE_NAME}\"}}"
+ }
+ }
+ },
+ {
+ "kind": "RoleBinding",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}_edit"
+ },
+ "groupNames": null,
+ "subjects": [
+ {
+ "kind": "ServiceAccount",
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ ],
+ "roleRef": {
+ "name": "edit"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JNLP_SERVICE_NAME}"
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "agent",
+ "protocol": "TCP",
+ "port": 50000,
+ "targetPort": 50000,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]",
+ "service.openshift.io/infrastructure": "true"
+ },
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 8080,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "JENKINS_SERVICE_NAME",
+ "displayName": "Jenkins Service Name",
+ "description": "The name of the OpenShift Service exposed for the Jenkins container.",
+ "value": "jenkins"
+ },
+ {
+ "name": "JNLP_SERVICE_NAME",
+ "displayName": "Jenkins JNLP Service Name",
+ "description": "The name of the service used for master/slave communication.",
+ "value": "jenkins-jnlp"
+ },
+ {
+ "name": "ENABLE_OAUTH",
+ "displayName": "Enable OAuth in Jenkins",
+ "description": "Whether to enable OAuth OpenShift integration. If false, the static account 'admin' will be initialized with the password 'password'.",
+ "value": "true"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Jenkins ImageStream Namespace",
+ "description": "The OpenShift Namespace where the Jenkins ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "JENKINS_IMAGE_STREAM_TAG",
+ "displayName": "Jenkins ImageStreamTag",
+ "description": "Name of the ImageStreamTag to be used for the Jenkins image.",
+ "value": "jenkins:latest"
+ }
+ ],
+ "labels": {
+ "template": "jenkins-ephemeral-template"
+ }
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json
new file mode 100644
index 000000000..50c4ad566
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/jenkins-persistent-template.json
@@ -0,0 +1,299 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "openshift.io/display-name": "Jenkins (Persistent)",
+ "description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
+ "iconClass": "icon-jenkins",
+ "tags": "instant-app,jenkins"
+ }
+ },
+ "message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
+ "objects": [
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "tls": {
+ "termination": "edge",
+ "insecureEdgeTerminationPolicy": "Redirect"
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "jenkins"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${JENKINS_IMAGE_STREAM_TAG}",
+ "namespace": "${NAMESPACE}"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${JENKINS_SERVICE_NAME}",
+ "containers": [
+ {
+ "name": "jenkins",
+ "image": " ",
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/login",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 420,
+ "failureThreshold" : 30,
+ "httpGet": {
+ "path": "/login",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "OPENSHIFT_ENABLE_OAUTH",
+ "value": "${ENABLE_OAUTH}"
+ },
+ {
+ "name": "OPENSHIFT_ENABLE_REDIRECT_PROMPT",
+ "value": "true"
+ },
+ {
+ "name": "KUBERNETES_MASTER",
+ "value": "https://kubernetes.default:443"
+ },
+ {
+ "name": "KUBERNETES_TRUST_CERTIFICATES",
+ "value": "true"
+ },
+ {
+ "name": "JNLP_SERVICE_NAME",
+ "value": "${JNLP_SERVICE_NAME}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${JENKINS_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/jenkins"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${JENKINS_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${JENKINS_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ }
+ },
+ {
+ "kind": "ServiceAccount",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "serviceaccounts.openshift.io/oauth-redirectreference.jenkins": "{\"kind\":\"OAuthRedirectReference\",\"apiVersion\":\"v1\",\"reference\":{\"kind\":\"Route\",\"name\":\"${JENKINS_SERVICE_NAME}\"}}"
+ }
+ }
+ },
+ {
+ "kind": "RoleBinding",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}_edit"
+ },
+ "groupNames": null,
+ "subjects": [
+ {
+ "kind": "ServiceAccount",
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ ],
+ "roleRef": {
+ "name": "edit"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JNLP_SERVICE_NAME}"
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "agent",
+ "protocol": "TCP",
+ "port": 50000,
+ "targetPort": 50000,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${JNLP_SERVICE_NAME}\", \"namespace\": \"\", \"kind\": \"Service\"}]",
+ "service.openshift.io/infrastructure": "true"
+ },
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 8080,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "JENKINS_SERVICE_NAME",
+ "displayName": "Jenkins Service Name",
+ "description": "The name of the OpenShift Service exposed for the Jenkins container.",
+ "value": "jenkins"
+ },
+ {
+ "name": "JNLP_SERVICE_NAME",
+ "displayName": "Jenkins JNLP Service Name",
+ "description": "The name of the service used for master/slave communication.",
+ "value": "jenkins-jnlp"
+ },
+ {
+ "name": "ENABLE_OAUTH",
+ "displayName": "Enable OAuth in Jenkins",
+ "description": "Whether to enable OAuth OpenShift integration. If false, the static account 'admin' will be initialized with the password 'password'.",
+ "value": "true"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "displayName": "Volume Capacity",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi.",
+ "value": "1Gi",
+ "required": true
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Jenkins ImageStream Namespace",
+ "description": "The OpenShift Namespace where the Jenkins ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "JENKINS_IMAGE_STREAM_TAG",
+ "displayName": "Jenkins ImageStreamTag",
+ "description": "Name of the ImageStreamTag to be used for the Jenkins image.",
+ "value": "jenkins:latest"
+ }
+ ],
+ "labels": {
+ "template": "jenkins-persistent-template"
+ }
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json
new file mode 100644
index 000000000..91f9ec7b3
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/nodejs-mongodb.json
@@ -0,0 +1,517 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-mongodb-example",
+ "annotations": {
+ "openshift.io/display-name": "Node.js + MongoDB (Ephemeral)",
+ "description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,nodejs",
+ "iconClass": "icon-nodejs"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
+ "labels": {
+ "template": "nodejs-mongodb-example"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData": {
+ "databaseUser": "${DATABASE_USER}",
+ "databasePassword": "${DATABASE_PASSWORD}",
+ "databaseAdminPassword" : "${DATABASE_ADMIN_PASSWORD}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "nodejs:4"
+ },
+ "env": [
+ {
+ "name": "NPM_MIRROR",
+ "value": "${NPM_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "npm test"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "nodejs-mongodb-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "nodejs-mongodb-example",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "MONGODB_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseAdminPassword"
+ }
+ }
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/pagecount",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 30,
+ "httpGet": {
+ "path": "/pagecount",
+ "port": 8080
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mongodb",
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "mongodb:3.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mongodb",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 27017
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseAdminPassword"
+ }
+ }
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 3,
+ "exec": {
+ "command": [
+ "/bin/sh",
+ "-i",
+ "-c",
+ "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""
+ ]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 27017
+ }
+ },
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_MONGODB_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mongodb/data"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "nodejs-mongodb-example"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "required": true,
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the Node.js container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_MONGODB_LIMIT",
+ "displayName": "Memory Limit (MongoDB)",
+ "description": "Maximum amount of memory the MongoDB container can use.",
+ "required": true,
+ "value": "512Mi"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "description": "The URL of the repository with your application source code.",
+ "required": true,
+ "value": "https://github.com/openshift/nodejs-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Node.js service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "displayName": "Database Service Name",
+ "required": true,
+ "value": "mongodb"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "MongoDB Username",
+ "description": "Username for MongoDB user that will be used for accessing the database.",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "MongoDB Password",
+ "description": "Password for the MongoDB user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "displayName": "Database Name",
+ "required": true,
+ "value": "sampledb"
+ },
+ {
+ "name": "DATABASE_ADMIN_PASSWORD",
+ "displayName": "Database Administrator Password",
+ "description": "Password for the database admin user.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "NPM_MIRROR",
+ "displayName": "Custom NPM Mirror URL",
+ "description": "The custom NPM mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json
new file mode 100644
index 000000000..6373562c4
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/quickstart-templates/rails-postgresql.json
@@ -0,0 +1,562 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "rails-postgresql-example",
+ "annotations": {
+ "openshift.io/display-name": "Rails + PostgreSQL (Ephemeral)",
+ "description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
+ "tags": "quickstart,ruby,rails",
+ "iconClass": "icon-ruby"
+ }
+ },
+ "message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
+ "labels": {
+ "template": "rails-postgresql-example"
+ },
+ "objects": [
+ {
+ "kind": "Secret",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "stringData" : {
+ "databaseUser" : "${DATABASE_USER}",
+ "databasePassword" : "${DATABASE_PASSWORD}",
+ "applicationUser" : "${APPLICATION_USER}",
+ "applicationPassword" : "${APPLICATION_PASSWORD}",
+ "keyBase" : "${SECRET_KEY_BASE}"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Exposes and load balances the application pods",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${DATABASE_SERVICE_NAME}\", \"kind\": \"Service\"}]"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "${NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "ruby:2.3"
+ },
+ "env": [
+ {
+ "name": "RUBYGEM_MIRROR",
+ "value": "${RUBYGEM_MIRROR}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "ConfigChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ],
+ "postCommit": {
+ "script": "bundle exec rake test"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "recreateParams": {
+ "pre": {
+ "failurePolicy": "Abort",
+ "execNewPod": {
+ "command": [
+ "./migrate-database.sh"
+ ],
+ "containerName": "${NAME}"
+ }
+ }
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "rails-postgresql-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${NAME}",
+ "labels": {
+ "name": "${NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "rails-postgresql-example",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 5,
+ "httpGet": {
+ "path": "/articles",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 10,
+ "httpGet": {
+ "path": "/articles",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databaseUser"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "databasePassword"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "keyBase"
+ }
+ }
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "value": "${APPLICATION_DOMAIN}"
+ },
+ {
+ "name": "APPLICATION_USER",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "applicationUser"
+ }
+ }
+ },
+ {
+ "name": "APPLICATION_PASSWORD",
+ "valueFrom": {
+ "secretKeyRef" : {
+ "name" : "${NAME}",
+ "key" : "applicationPassword"
+ }
+ }
+ },
+ {
+ "name": "RAILS_ENV",
+ "value": "${RAILS_ENV}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${NAMESPACE}",
+ "name": "postgresql:9.5"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "volumes": [
+ {
+ "name": "data",
+ "emptyDir": {}
+ }
+ ],
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": " ",
+ "ports": [
+ {
+ "containerPort": 5432
+ }
+ ],
+ "readinessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 5,
+ "exec": {
+ "command": [ "/bin/sh", "-i", "-c", "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"]
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 1,
+ "initialDelaySeconds": 30,
+ "tcpSocket": {
+ "port": 5432
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_POSTGRESQL_LIMIT}"
+ }
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "NAME",
+ "displayName": "Name",
+ "description": "The name assigned to all of the frontend objects defined in this template.",
+ "required": true,
+ "value": "rails-postgresql-example"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "required": true,
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "required": true,
+ "description": "Maximum amount of memory the Rails container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "MEMORY_POSTGRESQL_LIMIT",
+ "displayName": "Memory Limit (PostgreSQL)",
+ "required": true,
+ "description": "Maximum amount of memory the PostgreSQL container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "displayName": "Git Repository URL",
+ "required": true,
+ "description": "The URL of the repository with your application source code.",
+ "value": "https://github.com/openshift/rails-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "displayName": "Git Reference",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "displayName": "Context Directory",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository."
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "displayName": "Application Hostname",
+ "description": "The exposed hostname that will route to the Rails service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "displayName": "GitHub Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "displayName": "Secret Key",
+ "description": "Your secret key for verifying the integrity of signed cookies.",
+ "generate": "expression",
+ "from": "[a-z0-9]{127}"
+ },
+ {
+ "name": "APPLICATION_USER",
+ "displayName": "Application Username",
+ "required": true,
+ "description": "The application user that is used within the sample application to authorize access on pages.",
+ "value": "openshift"
+ },
+ {
+ "name": "APPLICATION_PASSWORD",
+ "displayName": "Application Password",
+ "required": true,
+ "description": "The application password that is used within the sample application to authorize access on pages.",
+ "value": "secret"
+ },
+ {
+ "name": "RAILS_ENV",
+ "displayName": "Rails Environment",
+ "required": true,
+ "description": "Environment under which the sample application will run. Could be set to production, development or test.",
+ "value": "production"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "required": true,
+ "displayName": "Database Service Name",
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_USER",
+ "displayName": "Database Username",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "displayName": "Database Password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{8}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "required": true,
+ "displayName": "Database Name",
+ "value": "root"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "displayName": "Maximum Database Connections",
+ "value": "100"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "displayName": "Shared Buffer Amount",
+ "value": "12MB"
+ },
+ {
+ "name": "RUBYGEM_MIRROR",
+ "displayName": "Custom RubyGems Mirror URL",
+ "description": "The custom RubyGems mirror URL",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-streams/fis-image-streams.json b/roles/openshift_examples/files/examples/v1.5/xpaas-streams/fis-image-streams.json
new file mode 100644
index 000000000..ed0e94bed
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-streams/fis-image-streams.json
@@ -0,0 +1,56 @@
+{
+ "kind": "List",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "fis-image-streams",
+ "annotations": {
+ "description": "ImageStream definitions for JBoss Fuse Integration Services."
+ }
+ },
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "fis-java-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-fuse-6/fis-java-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Fuse Integration Services 6.2.1 Java S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,jboss-fuse,java,xpaas",
+ "supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
+ "version": "1.0"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "fis-karaf-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-fuse-6/fis-karaf-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Fuse Integration Services 6.2.1 Karaf S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,jboss-fuse,java,karaf,xpaas",
+ "supports":"jboss-fuse:6.2.1,java:8,xpaas:1.2",
+ "version": "1.0"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v1.5/xpaas-streams/jboss-image-streams.json
new file mode 100644
index 000000000..a7cb12867
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-streams/jboss-image-streams.json
@@ -0,0 +1,372 @@
+{
+ "kind": "List",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-image-streams",
+ "annotations": {
+ "description": "ImageStream definitions for JBoss Middleware products."
+ }
+ },
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-webserver30-tomcat7-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
+ "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.1"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
+ "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.2",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.2"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-webserver30-tomcat8-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
+ "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.1"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
+ "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.2",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.2"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-eap64-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-eap-6/eap64-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:6.4,javaee:6,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.1"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:6.4,javaee:6,java:8,xpaas:1.2",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.2"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:6.4,javaee:6,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.3"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:6.4,javaee:6,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-eap70-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-eap-7/eap70-openshift",
+ "tags": [
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss EAP 7.0 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:7.0,javaee:7,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "7.0.0.GA",
+ "version": "1.3"
+ }
+ },
+ {
+ "name": "1.4",
+ "annotations": {
+ "description": "JBoss EAP 7.0 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:7.0,javaee:7,java:8,xpaas:1.4",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "7.0.0.GA",
+ "version": "1.4"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-decisionserver62-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver62-openshift",
+ "tags": [
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "Red Hat JBoss BRMS 6.2 decision server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,decisionserver,java,xpaas",
+ "supports":"decisionserver:6.2,java:8,xpaas:1.2",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "decisionserver/hellorules",
+ "sampleRef": "1.2",
+ "version": "1.2"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-decisionserver63-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-decisionserver-6/decisionserver63-openshift",
+ "tags": [
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "Red Hat JBoss BRMS 6.3 decision server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,decisionserver,java,xpaas",
+ "supports":"decisionserver:6.3,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "decisionserver/hellorules",
+ "sampleRef": "1.3",
+ "version": "1.3"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-processserver63-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-processserver-6/processserver63-openshift",
+ "tags": [
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "Red Hat JBoss BPM Suite 6.3 intelligent process server S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,processserver,java,xpaas",
+ "supports":"processserver:6.3,java:8,xpaas:1.3",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "processserver/library",
+ "sampleRef": "1.3",
+ "version": "1.3"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datagrid65-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-6/datagrid65-openshift",
+ "tags": [
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss Data Grid 6.5 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datagrid,java,jboss,xpaas",
+ "supports":"datagrid:6.5,java:8,xpaas:1.2",
+ "version": "1.2"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-datavirt63-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datavirt-6/datavirt63-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "Red Hat JBoss Data Virtualization 6.3 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datavirt,java,jboss,xpaas",
+ "supports":"datavirt:6.3,java:8,xpaas:1.4",
+ "version": "1.0"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-amq-62"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq62-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.1",
+ "version": "1.1"
+ }
+ },
+ {
+ "name": "1.2",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.2",
+ "version": "1.2"
+ }
+ },
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.3",
+ "version": "1.3"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "redhat-sso70-openshift",
+ "annotations": {
+ "description": "Red Hat SSO 7.0"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/redhat-sso-7/sso70-openshift",
+ "tags": [
+ {
+ "name": "1.3",
+ "annotations": {
+ "description": "Red Hat SSO 7.0",
+ "iconClass": "icon-jboss",
+ "tags": "sso,keycloak,redhat",
+ "supports":"sso:7.0,xpaas:1.3",
+ "version": "1.3"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-basic.json
new file mode 100644
index 000000000..ab35afead
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-basic.json
@@ -0,0 +1,321 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.3.1"
+ },
+ "name": "amq62-basic"
+ },
+ "labels": {
+ "template": "amq62-basic",
+ "xpaas": "1.3.1"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent-ssl.json
new file mode 100644
index 000000000..c12f06dec
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent-ssl.json
@@ -0,0 +1,549 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.3.1"
+ },
+ "name": "amq62-persistent-ssl"
+ },
+ "labels": {
+ "template": "amq62-persistent-ssl",
+ "xpaas": "1.3.1"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "Name of a secret containing SSL related files",
+ "name": "AMQ_SECRET",
+ "value": "amq-app-secret",
+ "required": true
+ },
+ {
+ "description": "SSL trust store filename",
+ "name": "AMQ_TRUSTSTORE",
+ "value": "broker.ts",
+ "required": true
+ },
+ {
+ "description": "SSL trust store password",
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "SSL key store filename",
+ "name": "AMQ_KEYSTORE",
+ "value": "broker.ks",
+ "required": true
+ },
+ {
+ "description": "Password for accessing SSL keystore",
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5671,
+ "targetPort": 5671
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8883,
+ "targetPort": 8883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61612,
+ "targetPort": 61612
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61617,
+ "targetPort": 61617
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire (SSL) port."
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "amq-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "broker-secret-volume",
+ "mountPath": "/etc/amq-secret-volume",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt-ssl",
+ "containerPort": 8883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
+ "value": "/etc/amq-secret-volume"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE",
+ "value": "${AMQ_TRUSTSTORE}"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "${AMQ_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_KEYSTORE",
+ "value": "${AMQ_KEYSTORE}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "${AMQ_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "broker-secret-volume",
+ "secret": {
+ "secretName": "${AMQ_SECRET}"
+ }
+ },
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent.json
new file mode 100644
index 000000000..897ce0395
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-persistent.json
@@ -0,0 +1,371 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.3.1"
+ },
+ "name": "amq62-persistent"
+ },
+ "labels": {
+ "template": "amq62-persistent",
+ "xpaas": "1.3.1"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-ssl.json
new file mode 100644
index 000000000..97d110286
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/amq62-ssl.json
@@ -0,0 +1,503 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.3.1"
+ },
+ "name": "amq62-ssl"
+ },
+ "labels": {
+ "template": "amq62-ssl",
+ "xpaas": "1.3.1"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Name of a secret containing SSL related files",
+ "name": "AMQ_SECRET",
+ "value": "amq-app-secret",
+ "required": true
+ },
+ {
+ "description": "SSL trust store filename",
+ "name": "AMQ_TRUSTSTORE",
+ "value": "broker.ts",
+ "required": true
+ },
+ {
+ "description": "SSL trust store password",
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "SSL key store filename",
+ "name": "AMQ_KEYSTORE",
+ "value": "broker.ks",
+ "required": true
+ },
+ {
+ "description": "Password for accessing SSL keystore",
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5671,
+ "targetPort": 5671
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8883,
+ "targetPort": 8883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61612,
+ "targetPort": 61612
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61617,
+ "targetPort": 61617
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire (SSL) port."
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "amq-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "broker-secret-volume",
+ "mountPath": "/etc/amq-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt-ssl",
+ "containerPort": 8883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
+ "value": "/etc/amq-secret-volume"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE",
+ "value": "${AMQ_TRUSTSTORE}"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "${AMQ_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_KEYSTORE",
+ "value": "${AMQ_KEYSTORE}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "${AMQ_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "broker-secret-volume",
+ "secret": {
+ "secretName": "${AMQ_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-basic.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-basic.json
new file mode 100644
index 000000000..56e76016f
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-basic.json
@@ -0,0 +1,332 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 6.5 applications.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "datagrid65-basic"
+ },
+ "labels": {
+ "template": "datagrid65-basic",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Password for JDG user.",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-openshift:1.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid65-openshift",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-https.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-https.json
new file mode 100644
index 000000000..639ac2e11
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-https.json
@@ -0,0 +1,501 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 6.5 applications.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "datagrid65-https"
+ },
+ "labels": {
+ "template": "datagrid65-https",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Password for JDG user.",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-openshift:1.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid65-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql-persistent.json
new file mode 100644
index 000000000..22ca3f0a0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql-persistent.json
@@ -0,0 +1,779 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 6.5 and MySQL applications with persistent storage.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "datagrid65-mysql-persistent"
+ },
+ "labels": {
+ "template": "datagrid65-mysql-persistent",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Password for JDG user.",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:/jboss/datasources/mysql",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-openshift:1.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid65-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql.json
new file mode 100644
index 000000000..e1a585d24
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-mysql.json
@@ -0,0 +1,739 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 6.5 and MySQL applications.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "datagrid65-mysql"
+ },
+ "labels": {
+ "template": "datagrid65-mysql",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Password for JDG user.",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:/jboss/datasources/mysql",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-openshift:1.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid65-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql-persistent.json
new file mode 100644
index 000000000..12720eb19
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql-persistent.json
@@ -0,0 +1,756 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 6.5 and PostgreSQL applications with persistent storage.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "datagrid65-postgresql-persistent"
+ },
+ "labels": {
+ "template": "datagrid65-postgresql-persistent",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Password for JDG user.",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/postgresql",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-openshift:1.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid65-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql.json
new file mode 100644
index 000000000..da8015fb0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datagrid65-postgresql.json
@@ -0,0 +1,716 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 6.5 and PostgreSQL applications built using.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "datagrid65-postgresql"
+ },
+ "labels": {
+ "template": "datagrid65-postgresql",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Password for JDG user.",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/postgresql",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configurd for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid65-openshift:1.2"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid65-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-basic-s2i.json
new file mode 100644
index 000000000..7d64dac98
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-basic-s2i.json
@@ -0,0 +1,415 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JBoss Data Virtualization 6.3 services built using S2I.",
+ "tags": "jdv,datavirt,jboss,xpaas",
+ "version": "1.4.0"
+ },
+ "name": "datavirt63-basic-s2i"
+ },
+ "labels": {
+ "template": "datavirt63-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data service has been created in your project. The username/password for accessing the service is ${TEIID_USERNAME}/${TEIID_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the secret named ${CONFIGURATION_NAME} containing the datasource configuration details required by the deployed VDB(s).",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "datavirt-app",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing configuration properties for the data sources.",
+ "displayName": "Configuration Secret Name",
+ "name": "CONFIGURATION_NAME",
+ "value": "datavirt-app-config",
+ "required": true
+ },
+ {
+ "description": "Specify a custom hostname for the http route. Leave blank to use default hostname, e.g.: <service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with your application source code.",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your repository.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/dynamicvdb-datafederation/app",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret specified by CONFIGURATION_NAME.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "datavirt-service-account",
+ "required": true
+ },
+ {
+ "description": "Username associated with Teiid data service.",
+ "displayName": "Teiid Username",
+ "name": "TEIID_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for Teiid user.",
+ "displayName": "Teiid User Password",
+ "name": "TEIID_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Username associated with ModeShape.",
+ "displayName": "ModeShape Username",
+ "name": "MODESHAPE_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for ModeShape user.",
+ "displayName": "ModeShape User Password",
+ "name": "MODESHAPE_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the GitHub webhook.",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the Generic webhook.",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "displayName": "JGroups Cluster Password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "displayName": "Deploy Exploded Archives",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "name": "http",
+ "port": 8080,
+ "targetPort": "http"
+ },
+ {
+ "name": "jdbc",
+ "port": 31000,
+ "targetPort": "jdbc"
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The data virtualization services."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "port": {
+ "targetPort": "http"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datavirt63-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/datavirt-environment",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbc",
+ "containerPort": 31000,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ },
+ {
+ "name": "MODESHAPE_USERNAME",
+ "value": "${MODESHAPE_USERNAME}"
+ },
+ {
+ "name": "MODESHAPE_PASSWORD",
+ "value": "${MODESHAPE_PASSWORD}"
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/datavirt-environment/*"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-extensions-support-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-extensions-support-s2i.json
new file mode 100644
index 000000000..1e7c03b99
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-extensions-support-s2i.json
@@ -0,0 +1,763 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes support for installing extensions (e.g. third-party DB drivers) and the ability to configure certificates for serving secure content.",
+ "tags": "jdv,datavirt,jboss,xpaas",
+ "version": "1.4.0"
+ },
+ "name": "datavirt63-extensions-support-s2i"
+ },
+ "labels": {
+ "template": "datavirt63-extensions-support-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data service has been created in your project. The username/password for accessing the service is ${TEIID_USERNAME}/${TEIID_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed VDB(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "datavirt-app",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing configuration properties for the data sources.",
+ "displayName": "Configuration Secret Name",
+ "name": "CONFIGURATION_NAME",
+ "value": "datavirt-app-config",
+ "required": true
+ },
+ {
+ "description": "Specify a custom hostname for the http route. Leave blank to use default hostname, e.g.: <service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the https route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom https Route Hostname",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the JDBC route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom JDBC Route Hostname",
+ "name": "HOSTNAME_JDBC",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with your application source code.",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your repository.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/dynamicvdb-datafederation/app",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with source code for the extensions image. The image should have all modules, etc., placed in the \"/extensions/\" directory in the image. If the contents are in a different directory, the sourcePath for the ImageSource in the BuildConfig must be modified.",
+ "displayName": "Extensions Git Repository URL",
+ "name": "EXTENSIONS_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your extensions repository if you are not using the default branch.",
+ "displayName": "Extensions Git Reference",
+ "name": "EXTENSIONS_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your extensions repository.",
+ "displayName": "Extensions Context Directory",
+ "name": "EXTENSIONS_DIR",
+ "value": "datavirt/derby-driver-image",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to the Dockerfile in your extensions directory.",
+ "displayName": "Extensions Dockerfile",
+ "name": "EXTENSIONS_DOCKERFILE",
+ "value": "Dockerfile",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "datavirt-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for serving secure content.",
+ "displayName": "Server Keystore Secret Name",
+ "name": "HTTPS_SECRET",
+ "value": "datavirt-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret.",
+ "displayName": "Server Keystore Filename",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS).",
+ "displayName": "Server Keystore Type",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate.",
+ "displayName": "Server Certificate Name",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "Server Keystore Password",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Username associated with Teiid data service.",
+ "displayName": "Teiid Username",
+ "name": "TEIID_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for Teiid user.",
+ "displayName": "Teiid User Password",
+ "name": "TEIID_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Username associated with ModeShape.",
+ "displayName": "ModeShape Username",
+ "name": "MODESHAPE_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for ModeShape user.",
+ "displayName": "ModeShape User Password",
+ "name": "MODESHAPE_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the GitHub webhook.",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the Generic webhook.",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "displayName": "JGroups Secret Name",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the JGroups secret.",
+ "displayName": "JGroups Keystore Filename",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the JGroups server certificate",
+ "displayName": "JGroups Certificate Name",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "JGroups Keystore Password",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "displayName": "JGroups Cluster Password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "displayName": "Deploy Exploded Archives",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "name": "http",
+ "port": 8080,
+ "targetPort": "http"
+ },
+ {
+ "name": "https",
+ "port": 8443,
+ "targetPort": "https"
+ },
+ {
+ "name": "jdbc",
+ "port": 31000,
+ "targetPort": "jdbc"
+ },
+ {
+ "name": "jdbcs",
+ "port": 31443,
+ "targetPort": "jdbcs"
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The data virtualization services."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "port": {
+ "targetPort": "http"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "port": {
+ "targetPort": "https"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-jdbc",
+ "metadata": {
+ "name": "jdbc-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's JDBC service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_JDBC}",
+ "port": {
+ "targetPort": "jdbcs"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-ext",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-ext",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${EXTENSIONS_REPOSITORY_URL}",
+ "ref": "${EXTENSIONS_REPOSITORY_REF}"
+ },
+ "contextDir": "${EXTENSIONS_DIR}"
+ },
+ "strategy": {
+ "type": "Docker",
+ "dockerStrategy": {
+ "dockerfilePath": "${EXTENSIONS_DOCKERFILE}"
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}-ext:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}-ext:latest"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/extras",
+ "sourcePath": "/extensions/."
+ }
+ ]
+ }
+ ]
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datavirt63-openshift:1.0"
+ },
+ "env": [
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}-ext:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/datavirt-environment",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "mountPath": "/etc/datavirt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbc",
+ "containerPort": 31000,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbcs",
+ "containerPort": 31443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datavirt-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ },
+ {
+ "name": "MODESHAPE_USERNAME",
+ "value": "${MODESHAPE_USERNAME}"
+ },
+ {
+ "name": "MODESHAPE_PASSWORD",
+ "value": "${MODESHAPE_PASSWORD}"
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/datavirt-environment/*"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE",
+ "value": "/etc/datavirt-secret-volume/${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEY_ALIAS",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "QS_DB_TYPE",
+ "value": "derby",
+ "description": "Used soley by the quickstart and set here to ensure the template can be instatiated with its default parameter values, i.e. so itworks ootb."
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-secure-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-secure-s2i.json
new file mode 100644
index 000000000..07f926ff3
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/datavirt63-secure-s2i.json
@@ -0,0 +1,642 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JBoss Data Virtualization 6.3 services built using S2I. Includes ability to configure certificates for serving secure content.",
+ "tags": "jdv,datavirt,jboss,xpaas",
+ "version": "1.4.0"
+ },
+ "name": "datavirt63-secure-s2i"
+ },
+ "labels": {
+ "template": "datavirt63-secure-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data service has been created in your project. The username/password for accessing the service is ${TEIID_USERNAME}/${TEIID_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed VDB(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "displayName": "Application Name",
+ "name": "APPLICATION_NAME",
+ "value": "datavirt-app",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing configuration properties for the data sources.",
+ "displayName": "Configuration Secret Name",
+ "name": "CONFIGURATION_NAME",
+ "value": "datavirt-app-config",
+ "required": true
+ },
+ {
+ "description": "Specify a custom hostname for the http route. Leave blank to use default hostname, e.g.: <service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom http Route Hostname",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the https route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom https Route Hostname",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Specify a custom hostname for the JDBC route. Leave blank to use default hostname, e.g.: secure-<service-name>-<project>.<default-domain-suffix>",
+ "displayName": "Custom JDBC Route Hostname",
+ "name": "HOSTNAME_JDBC",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The URL of the repository with your application source code.",
+ "displayName": "Git Repository URL",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch.",
+ "displayName": "Git Reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "description": "Set this to the relative path to your project if it is not in the root of your repository.",
+ "displayName": "Context Directory",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/dynamicvdb-datafederation/app",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow usage of the secret(s) specified by CONFIGURATION_NAME, HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "datavirt-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for serving secure content.",
+ "displayName": "Server Keystore Secret Name",
+ "name": "HTTPS_SECRET",
+ "value": "datavirt-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret.",
+ "displayName": "Server Keystore Filename",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS).",
+ "displayName": "Server Keystore Type",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate.",
+ "displayName": "Server Certificate Name",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "Server Keystore Password",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Username associated with Teiid data service.",
+ "displayName": "Teiid Username",
+ "name": "TEIID_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for Teiid user.",
+ "displayName": "Teiid User Password",
+ "name": "TEIID_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Username associated with ModeShape.",
+ "displayName": "ModeShape Username",
+ "name": "MODESHAPE_USERNAME",
+ "from": "[\\a]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for ModeShape user.",
+ "displayName": "ModeShape User Password",
+ "name": "MODESHAPE_PASSWORD",
+ "from": "[\\a\\A]{8}[\\d]{1}[\\A]{1}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the GitHub webhook.",
+ "displayName": "Github Webhook Secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "A secret string used to configure the Generic webhook.",
+ "displayName": "Generic Webhook Secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "displayName": "ImageStream Namespace",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "displayName": "JGroups Secret Name",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datavirt-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the JGroups secret.",
+ "displayName": "JGroups Keystore Filename",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the JGroups server certificate",
+ "displayName": "JGroups Certificate Name",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "displayName": "JGroups Keystore Password",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "displayName": "JGroups Cluster Password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "displayName": "Deploy Exploded Archives",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "name": "http",
+ "port": 8080,
+ "targetPort": "http"
+ },
+ {
+ "name": "https",
+ "port": 8443,
+ "targetPort": "https"
+ },
+ {
+ "name": "jdbc",
+ "port": 31000,
+ "targetPort": "jdbc"
+ },
+ {
+ "name": "jdbcs",
+ "port": 31443,
+ "targetPort": "jdbcs"
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The data virtualization services."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "port": {
+ "targetPort": "http"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https (REST) service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "port": {
+ "targetPort": "https"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-jdbc",
+ "metadata": {
+ "name": "jdbc-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's JDBC service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_JDBC}",
+ "port": {
+ "targetPort": "jdbcs"
+ },
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datavirt63-openshift:1.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/datavirt-environment",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "mountPath": "/etc/datavirt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbc",
+ "containerPort": 31000,
+ "protocol": "TCP"
+ },
+ {
+ "name": "jdbcs",
+ "containerPort": 31443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datavirt-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "TEIID_USERNAME",
+ "value": "${TEIID_USERNAME}"
+ },
+ {
+ "name": "TEIID_PASSWORD",
+ "value": "${TEIID_PASSWORD}"
+ },
+ {
+ "name": "MODESHAPE_USERNAME",
+ "value": "${MODESHAPE_USERNAME}"
+ },
+ {
+ "name": "MODESHAPE_PASSWORD",
+ "value": "${MODESHAPE_PASSWORD}"
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/datavirt-environment/*"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE",
+ "value": "/etc/datavirt-secret-volume/${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEY_ALIAS",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "DATAVIRT_TRANSPORT_KEYSTORE_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "datavirt-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datavirt-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-amq-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-amq-s2i.json
new file mode 100644
index 000000000..754a3b4c0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-amq-s2i.json
@@ -0,0 +1,686 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.2 decision server A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,amq,java,messaging,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "decisionserver62-amq-s2i"
+ },
+ "labels": {
+ "template": "decisionserver62-amq-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "decisionserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver62-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "decisionserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "mountPath": "/etc/decisionserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/decisionserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-basic-s2i.json
new file mode 100644
index 000000000..8be4ac90b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-basic-s2i.json
@@ -0,0 +1,339 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.2 decision server applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,java,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "decisionserver62-basic-s2i"
+ },
+ "labels": {
+ "template": "decisionserver62-basic-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver62-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-https-s2i.json
new file mode 100644
index 000000000..bf9047599
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver62-https-s2i.json
@@ -0,0 +1,473 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.2 decision server HTTPS applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,java,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "decisionserver62-https-s2i"
+ },
+ "labels": {
+ "template": "decisionserver62-https-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "HelloRulesContainer=org.openshift.quickstarts:decisionserver-hellorules:1.2.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "decisionserver-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver62-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "decisionserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "mountPath": "/etc/decisionserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/decisionserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-amq-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-amq-s2i.json
new file mode 100644
index 000000000..51e667e02
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-amq-s2i.json
@@ -0,0 +1,696 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.3 decision server A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,amq,java,messaging,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "decisionserver63-amq-s2i"
+ },
+ "labels": {
+ "template": "decisionserver63-amq-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "decisionserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "decisionserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "mountPath": "/etc/decisionserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/decisionserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-basic-s2i.json
new file mode 100644
index 000000000..c5f0d006a
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-basic-s2i.json
@@ -0,0 +1,339 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.3 decision server applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,java,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "decisionserver63-basic-s2i"
+ },
+ "labels": {
+ "template": "decisionserver63-basic-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-https-s2i.json
new file mode 100644
index 000000000..3db0e4c84
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/decisionserver63-https-s2i.json
@@ -0,0 +1,473 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BRMS 6.3 decision server HTTPS applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "decisionserver,java,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "decisionserver63-https-s2i"
+ },
+ "labels": {
+ "template": "decisionserver63-https-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "decisionserver-hellorules=org.openshift.quickstarts:decisionserver-hellorules:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "decisionserver/hellorules",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "decisionserver-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-decisionserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "decisionserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "mountPath": "/etc/decisionserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/decisionserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "decisionserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-persistent-s2i.json
new file mode 100644
index 000000000..72dbb4302
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-persistent-s2i.json
@@ -0,0 +1,813 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 A-MQ applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-amq-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap64-amq-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "6.4.x",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data/kahadb",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-s2i.json
new file mode 100644
index 000000000..9dd847451
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-amq-s2i.json
@@ -0,0 +1,760 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-amq-s2i"
+ },
+ "labels": {
+ "template": "eap64-amq-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "6.4.x",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-basic-s2i.json
new file mode 100644
index 000000000..7b1800b7b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-basic-s2i.json
@@ -0,0 +1,340 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 6 applications built using S2I.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-basic-s2i"
+ },
+ "labels": {
+ "template": "eap64-basic-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "6.4.x",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "kitchensink",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-https-s2i.json
new file mode 100644
index 000000000..31716d84c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-https-s2i.json
@@ -0,0 +1,525 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 6 applications built using S2I.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-https-s2i"
+ },
+ "labels": {
+ "template": "eap64-https-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "6.4.x",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "kitchensink",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..212431056
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-persistent-s2i.json
@@ -0,0 +1,781 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 MongoDB applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap64-mongodb-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-s2i.json
new file mode 100644
index 000000000..13fbbdd93
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mongodb-s2i.json
@@ -0,0 +1,741 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 MongoDB applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-mongodb-s2i"
+ },
+ "labels": {
+ "template": "eap64-mongodb-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-persistent-s2i.json
new file mode 100644
index 000000000..69fdec206
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-persistent-s2i.json
@@ -0,0 +1,792 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap64-mysql-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-mysql"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-s2i.json
new file mode 100644
index 000000000..2bd3c249f
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-mysql-s2i.json
@@ -0,0 +1,752 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-mysql-s2i"
+ },
+ "labels": {
+ "template": "eap64-mysql-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-mysql"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..31f245950
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-persistent-s2i.json
@@ -0,0 +1,769 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap64-postgresql-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-postgresql"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-s2i.json
new file mode 100644
index 000000000..eac964697
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-postgresql-s2i.json
@@ -0,0 +1,729 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap64-postgresql-s2i"
+ },
+ "labels": {
+ "template": "eap64-postgresql-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-postgresql"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-sso-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-sso-s2i.json
new file mode 100644
index 000000000..09023be71
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap64-sso-s2i.json
@@ -0,0 +1,756 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass" : "icon-jboss",
+ "description": "Application template for EAP 6 applications built using S2I, enabled for SSO.",
+ "tags": "eap,javaee,java,jboss,xpaas,sso,keycloak",
+ "version": "1.3.2"
+ },
+ "name": "eap64-sso-s2i"
+ },
+ "labels": {
+ "template": "eap64-sso-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Hostname for http service route (e.g. eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "Hostname for https service route (e.g. secure-eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/redhat-developer/redhat-sso-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "7.0.x-ose",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.",
+ "name": "SSO_URL",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The URL for the interal SSO service, where secure-sso is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
+ "name": "SSO_SERVICE_URL",
+ "value": "https://secure-sso:8443/auth",
+ "required": false
+ },
+ {
+ "description": "The SSO realm to which the application client(s) should be associated (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.",
+ "name": "SSO_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "name": "SSO_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "SSO Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability. This can be retrieved from the SSO server, for the specified realm.",
+ "name": "SSO_PUBLIC_KEY",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "SSO Client Access Type",
+ "name": "SSO_BEARER_ONLY",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "app-jee-jsp/target,service-jee-jaxrs/target,app-profile-jee-jsp/target,app-profile-saml-jee-jsp/target",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "The SSO Client Secret for Confidential Access",
+ "name": "SSO_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Enable CORS for SSO applications",
+ "name": "SSO_ENABLE_CORS",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "SSO logout page for SAML applications",
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "value": "/",
+ "required": false
+ },
+ {
+ "description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)",
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "value": "true",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.4"
+ },
+ "env": [
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": ""
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "sso-saml-keystore-volume",
+ "mountPath": "/etc/sso-saml-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HOSTNAME_HTTP",
+ "value": "${HOSTNAME_HTTP}"
+ },
+ {
+ "name": "HOSTNAME_HTTPS",
+ "value": "${HOSTNAME_HTTPS}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "SSO_URL",
+ "value": "${SSO_URL}"
+ },
+ {
+ "name": "SSO_SERVICE_URL",
+ "value": "${SSO_SERVICE_URL}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_USERNAME",
+ "value": "${SSO_USERNAME}"
+ },
+ {
+ "name": "SSO_PASSWORD",
+ "value": "${SSO_PASSWORD}"
+ },
+ {
+ "name": "SSO_PUBLIC_KEY",
+ "value": "${SSO_PUBLIC_KEY}"
+ },
+ {
+ "name": "SSO_BEARER_ONLY",
+ "value": "${SSO_BEARER_ONLY}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "${SSO_SAML_KEYSTORE_SECRET}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "${SSO_SAML_KEYSTORE}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_DIR",
+ "value": "/etc/sso-saml-secret-volume"
+ },
+ {
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "${SSO_SAML_CERTIFICATE_NAME}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "value": "${SSO_SAML_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "SSO_SECRET",
+ "value": "${SSO_SECRET}"
+ },
+ {
+ "name": "SSO_ENABLE_CORS",
+ "value": "${SSO_ENABLE_CORS}"
+ },
+ {
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "value": "${SSO_SAML_LOGOUT_PAGE}"
+ },
+ {
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "value": "${SSO_DISABLE_SSL_CERTIFICATE_VALIDATION}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "sso-saml-keystore-volume",
+ "secret": {
+ "secretName": "${SSO_SAML_KEYSTORE_SECRET}"
+ }
+ },
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-persistent-s2i.json
new file mode 100644
index 000000000..f08cdf2f9
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-persistent-s2i.json
@@ -0,0 +1,813 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 A-MQ applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-amq-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap70-amq-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data/kahadb",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-s2i.json
new file mode 100644
index 000000000..3ca9e9fab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-amq-s2i.json
@@ -0,0 +1,760 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-amq-s2i"
+ },
+ "labels": {
+ "template": "eap70-amq-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-basic-s2i.json
new file mode 100644
index 000000000..83b4d5b24
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-basic-s2i.json
@@ -0,0 +1,351 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 7 applications built using S2I.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-basic-s2i"
+ },
+ "labels": {
+ "template": "eap70-basic-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "7.0.0.GA",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "kitchensink",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-https-s2i.json
new file mode 100644
index 000000000..1292442a4
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-https-s2i.json
@@ -0,0 +1,536 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 7 applications built using S2I.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-https-s2i"
+ },
+ "labels": {
+ "template": "eap70-https-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "7.0.0.GA",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "kitchensink",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..99db77d58
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-persistent-s2i.json
@@ -0,0 +1,792 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 MongoDB applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap70-mongodb-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-s2i.json
new file mode 100644
index 000000000..c8150c231
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mongodb-s2i.json
@@ -0,0 +1,752 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 MongoDB applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-mongodb-s2i"
+ },
+ "labels": {
+ "template": "eap70-mongodb-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-persistent-s2i.json
new file mode 100644
index 000000000..f8e5c2b04
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-persistent-s2i.json
@@ -0,0 +1,807 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap70-mysql-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-mysql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-mysql"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-s2i.json
new file mode 100644
index 000000000..1edeb62e7
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-mysql-s2i.json
@@ -0,0 +1,767 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-mysql-s2i"
+ },
+ "labels": {
+ "template": "eap70-mysql-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-mysql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-mysql"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..d11df06ee
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-persistent-s2i.json
@@ -0,0 +1,784 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap70-postgresql-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-postgresql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-postgresql"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-s2i.json
new file mode 100644
index 000000000..6b7f6d707
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-postgresql-s2i.json
@@ -0,0 +1,744 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "eap70-postgresql-s2i"
+ },
+ "labels": {
+ "template": "eap70-postgresql-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-postgresql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-postgresql"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-sso-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-sso-s2i.json
new file mode 100644
index 000000000..811602220
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/eap70-sso-s2i.json
@@ -0,0 +1,767 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass" : "icon-jboss",
+ "description": "Application template for EAP 6 applications built using S2I, enabled for SSO.",
+ "tags": "eap,javaee,java,jboss,xpaas,sso,keycloak",
+ "version": "1.3.2"
+ },
+ "name": "eap70-sso-s2i"
+ },
+ "labels": {
+ "template": "eap70-sso-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Hostname for http service route (e.g. eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "Hostname for https service route (e.g. secure-eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/redhat-developer/redhat-sso-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "7.0.x-ose",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.",
+ "name": "SSO_URL",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The URL for the interal SSO service, where secure-sso (the default) is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
+ "name": "SSO_SERVICE_URL",
+ "value": "https://secure-sso:8443/auth",
+ "required": false
+ },
+ {
+ "description": "The SSO realm to which the application client(s) should be associated (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.",
+ "name": "SSO_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "name": "SSO_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "SSO Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability",
+ "name": "SSO_PUBLIC_KEY",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "SSO Client Access Type",
+ "name": "SSO_BEARER_ONLY",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "app-jee-jsp/target,service-jee-jaxrs/target,app-profile-jee-jsp/target,app-profile-saml-jee-jsp/target",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "The SSO Client Secret for Confidential Access",
+ "name": "SSO_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Enable CORS for SSO applications",
+ "name": "SSO_ENABLE_CORS",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "SSO logout page for SAML applications",
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "value": "/",
+ "required": false
+ },
+ {
+ "description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)",
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "value": "true",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap70-openshift:1.4"
+ },
+ "env": [
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": ""
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "sso-saml-keystore-volume",
+ "mountPath": "/etc/sso-saml-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HOSTNAME_HTTP",
+ "value": "${HOSTNAME_HTTP}"
+ },
+ {
+ "name": "HOSTNAME_HTTPS",
+ "value": "${HOSTNAME_HTTPS}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "SSO_URL",
+ "value": "${SSO_URL}"
+ },
+ {
+ "name": "SSO_SERVICE_URL",
+ "value": "${SSO_SERVICE_URL}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_USERNAME",
+ "value": "${SSO_USERNAME}"
+ },
+ {
+ "name": "SSO_PASSWORD",
+ "value": "${SSO_PASSWORD}"
+ },
+ {
+ "name": "SSO_PUBLIC_KEY",
+ "value": "${SSO_PUBLIC_KEY}"
+ },
+ {
+ "name": "SSO_BEARER_ONLY",
+ "value": "${SSO_BEARER_ONLY}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "${SSO_SAML_KEYSTORE_SECRET}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "${SSO_SAML_KEYSTORE}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_DIR",
+ "value": "/etc/sso-saml-secret-volume"
+ },
+ {
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "${SSO_SAML_CERTIFICATE_NAME}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "value": "${SSO_SAML_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "SSO_SECRET",
+ "value": "${SSO_SECRET}"
+ },
+ {
+ "name": "SSO_ENABLE_CORS",
+ "value": "${SSO_ENABLE_CORS}"
+ },
+ {
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "value": "${SSO_SAML_LOGOUT_PAGE}"
+ },
+ {
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "value": "${SSO_DISABLE_SSL_CERTIFICATE_VALIDATION}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "sso-saml-keystore-volume",
+ "secret": {
+ "secretName": "${SSO_SAML_KEYSTORE_SECRET}"
+ }
+ },
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-basic-s2i.json
new file mode 100644
index 000000000..413a6de87
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-basic-s2i.json
@@ -0,0 +1,284 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat7-basic-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-basic-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-https-s2i.json
new file mode 100644
index 000000000..610ea9441
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-https-s2i.json
@@ -0,0 +1,398 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat7-https-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-https-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..6ef9d6e4c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
@@ -0,0 +1,654 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat7-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-mongodb-persistent-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
new file mode 100644
index 000000000..9b48f8ae7
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
@@ -0,0 +1,614 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications built using S2I.",
+ "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat7-mongodb-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-mongodb-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
new file mode 100644
index 000000000..30af703ce
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
@@ -0,0 +1,656 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat7-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-mysql-persistent-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-s2i.json
new file mode 100644
index 000000000..c2843af63
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-mysql-s2i.json
@@ -0,0 +1,616 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications built using S2I.",
+ "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat7-mysql-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-mysql-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..b8372f374
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
@@ -0,0 +1,633 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "jws30-tomcat7-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-postgresql-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
new file mode 100644
index 000000000..cd5bb9fa4
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
@@ -0,0 +1,593 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications built using S2I.",
+ "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "jws30-tomcat7-postgresql-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-postgresql-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-basic-s2i.json
new file mode 100644
index 000000000..cb1e49d29
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-basic-s2i.json
@@ -0,0 +1,284 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat8-basic-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-basic-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-https-s2i.json
new file mode 100644
index 000000000..21d5662c7
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-https-s2i.json
@@ -0,0 +1,398 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat8-https-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-https-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..34657d826
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
@@ -0,0 +1,654 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat8-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-mongodb-persistent-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
new file mode 100644
index 000000000..974cfaddb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
@@ -0,0 +1,614 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications built using S2I.",
+ "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat8-mongodb-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-mongodb-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
new file mode 100644
index 000000000..7a8231cc5
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
@@ -0,0 +1,656 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat8-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-mysql-persistent-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-s2i.json
new file mode 100644
index 000000000..cda21f237
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-mysql-s2i.json
@@ -0,0 +1,616 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications built using S2I.",
+ "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas",
+ "version": "1.2.0"
+ },
+ "name": "jws30-tomcat8-mysql-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-mysql-s2i",
+ "xpaas": "1.2.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/mysqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..4dfc98015
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
@@ -0,0 +1,633 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "jws30-tomcat8-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-postgresql-persistent-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
new file mode 100644
index 000000000..f6c85668c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
@@ -0,0 +1,591 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications built using S2I.",
+ "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas",
+ "version": "1.3.2"
+ },
+ "name": "jws30-tomcat8-postgresql-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-postgresql-s2i",
+ "xpaas": "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.2",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. jboss/datasources/postgresqlDS",
+ "name": "DB_JNDI",
+ "value": "jboss/datasources/defaultDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.2"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "jws-service-account",
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
new file mode 100644
index 000000000..1dea463ac
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-persistent-s2i.json
@@ -0,0 +1,1079 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,amq,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-amq-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver63-amq-mysql-persistent-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB,${APPLICATION_NAME}-mysql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-s2i.json
new file mode 100644
index 000000000..42264585b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-mysql-s2i.json
@@ -0,0 +1,959 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,amq,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-amq-mysql-s2i"
+ },
+ "labels": {
+ "template": "processserver63-amq-mysql-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..f6d0c99ed
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-persistent-s2i.json
@@ -0,0 +1,1052 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,amq,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-amq-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver63-amq-postgresql-persistent-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB,${APPLICATION_NAME}-postgresql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteMany"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-s2i.json
new file mode 100644
index 000000000..41c726cf0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-amq-postgresql-s2i.json
@@ -0,0 +1,932 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server AMQ and PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,amq,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-amq-postgresql-s2i"
+ },
+ "labels": {
+ "template": "processserver63-amq-postgresql-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "JNDI name of request queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "queue/KIE.SERVER.REQUEST",
+ "required": false
+ },
+ {
+ "description": "JNDI name of response queue for JMS.",
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "queue/KIE.SERVER.RESPONSE",
+ "required": false
+ },
+ {
+ "description": "JNDI name of executor queue for JMS.",
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "queue/KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/JmsXA",
+ "name": "MQ_JNDI",
+ "value": "java:/JmsXA",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "KIE.SERVER.REQUEST,KIE.SERVER.RESPONSE,KIE.SERVER.EXECUTOR",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_REQUEST",
+ "value": "${KIE_SERVER_JMS_QUEUES_REQUEST}"
+ },
+ {
+ "name": "KIE_SERVER_JMS_QUEUES_RESPONSE",
+ "value": "${KIE_SERVER_JMS_QUEUES_RESPONSE}"
+ },
+ {
+ "name": "KIE_SERVER_EXECUTOR_JMS_QUEUE",
+ "value": "${KIE_SERVER_EXECUTOR_JMS_QUEUE}"
+ },
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-basic-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-basic-s2i.json
new file mode 100644
index 000000000..170c919cb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-basic-s2i.json
@@ -0,0 +1,345 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,javaee,java,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-basic-s2i"
+ },
+ "labels": {
+ "template": "processserver63-basic-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.H2Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-persistent-s2i.json
new file mode 100644
index 000000000..89d0db1a6
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-persistent-s2i.json
@@ -0,0 +1,792 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver63-mysql-persistent-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB,${APPLICATION_NAME}-mysql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-s2i.json
new file mode 100644
index 000000000..26cab29f8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-mysql-s2i.json
@@ -0,0 +1,716 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-mysql-s2i"
+ },
+ "labels": {
+ "template": "processserver63-mysql-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.MySQL5Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..32a512829
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-persistent-s2i.json
@@ -0,0 +1,765 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "processserver63-postgresql-persistent-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB,${APPLICATION_NAME}-postgresql=QUARTZ"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JNDI",
+ "value": "${DB_JNDI}NotManaged"
+ },
+ {
+ "name": "QUARTZ_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "QUARTZ_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "QUARTZ_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "QUARTZ_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "QUARTZ_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "QUARTZ_JTA",
+ "value": "false"
+ },
+ {
+ "name": "QUARTZ_NONXA",
+ "value": "true"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-s2i.json
new file mode 100644
index 000000000..55e2199bb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/processserver63-postgresql-s2i.json
@@ -0,0 +1,689 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for Red Hat JBoss BPM Suite 6.3 intelligent process server PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "processserver,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.3.3"
+ },
+ "name": "processserver63-postgresql-s2i"
+ },
+ "labels": {
+ "template": "processserver63-postgresql-s2i",
+ "xpaas": "1.3.3"
+ },
+ "parameters": [
+ {
+ "description": "The KIE Container deployment configuration in format: containerId=groupId:artifactId:version|c2=g2:a2:v2",
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "processserver-library=org.openshift.quickstarts:processserver-library:1.3.0.Final",
+ "required": false
+ },
+ {
+ "description": "The protocol to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "https",
+ "required": false
+ },
+ {
+ "description": "The port to access the KIE Server REST interface.",
+ "name": "KIE_SERVER_PORT",
+ "value": "8443",
+ "required": false
+ },
+ {
+ "description": "The user name to access the KIE Server REST or JMS interface.",
+ "name": "KIE_SERVER_USER",
+ "value": "kieserver",
+ "required": false
+ },
+ {
+ "description": "The password to access the KIE Server REST or JMS interface. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s).",
+ "name": "KIE_SERVER_PASSWORD",
+ "from": "[a-zA-Z]{6}[0-9]{1}!",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "JAAS LoginContext domain that shall be used to authenticate users when using JMS.",
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "other",
+ "required": false
+ },
+ {
+ "description": "Hibernate persistence dialect.",
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "org.hibernate.dialect.PostgreSQL82Dialect",
+ "required": false
+ },
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "kie-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "processserver/library",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/ExampleDS",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/ExampleDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "processserver-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-processserver63-openshift:1.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "processserver-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "processserver-keystore-volume",
+ "mountPath": "/etc/processserver-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "KIE_CONTAINER_DEPLOYMENT",
+ "value": "${KIE_CONTAINER_DEPLOYMENT}"
+ },
+ {
+ "name": "KIE_SERVER_PROTOCOL",
+ "value": "${KIE_SERVER_PROTOCOL}"
+ },
+ {
+ "name": "KIE_SERVER_PORT",
+ "value": "${KIE_SERVER_PORT}"
+ },
+ {
+ "name": "KIE_SERVER_USER",
+ "value": "${KIE_SERVER_USER}"
+ },
+ {
+ "name": "KIE_SERVER_PASSWORD",
+ "value": "${KIE_SERVER_PASSWORD}"
+ },
+ {
+ "name": "KIE_SERVER_DOMAIN",
+ "value": "${KIE_SERVER_DOMAIN}"
+ },
+ {
+ "name": "KIE_SERVER_PERSISTENCE_DIALECT",
+ "value": "${KIE_SERVER_PERSISTENCE_DIALECT}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/processserver-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "processserver-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-https.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-https.json
new file mode 100644
index 000000000..fb0578a67
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-https.json
@@ -0,0 +1,514 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.0",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,java,jboss,xpaas",
+ "version" : "1.3.2"
+ },
+ "name": "sso70-https"
+ },
+ "labels": {
+ "template": "sso70-https",
+ "xpaas" : "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso70-openshift:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql-persistent.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql-persistent.json
new file mode 100644
index 000000000..dcbb24bf1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql-persistent.json
@@ -0,0 +1,750 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.0 MySQL applications with persistent storage",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,mysql,java,database,jboss,xpaas",
+ "version" : "1.3.2"
+ },
+ "name": "sso70-mysql-persistent"
+ },
+ "labels": {
+ "template": "sso70-mysql-persistent",
+ "xpaas" : "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso70-openshift:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql.json
new file mode 100644
index 000000000..1768f7a1b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-mysql.json
@@ -0,0 +1,719 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.0 MySQL applications",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,mysql,java,database,jboss,xpaas",
+ "version" : "1.3.2"
+ },
+ "name": "sso70-mysql"
+ },
+ "labels": {
+ "template": "sso70-mysql",
+ "xpaas" : "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso70-openshift:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql-persistent.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql-persistent.json
new file mode 100644
index 000000000..4c2f81f2e
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql-persistent.json
@@ -0,0 +1,727 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.0 PostgreSQL applications with persistent storage",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,postrgresql,java,database,jboss,xpaas",
+ "version" : "1.3.2"
+ },
+ "name": "sso70-postgresql-persistent"
+ },
+ "labels": {
+ "template": "sso70-postgresql-persistent",
+ "xpaas" : "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso70-openshift:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql.json b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql.json
new file mode 100644
index 000000000..d8402ef72
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/xpaas-templates/sso70-postgresql.json
@@ -0,0 +1,696 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for SSO 7.0 PostgreSQL applications",
+ "iconClass" : "icon-jboss",
+ "tags" : "sso,keycloak,postrgresql,java,database,jboss,xpaas",
+ "version" : "1.3.2"
+ },
+ "name": "sso70-postgresql"
+ },
+ "labels": {
+ "template": "sso70-postgresql",
+ "xpaas" : "1.3.2"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "sso",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/KeycloakDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "sso-service-account",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "description": "SSO Server admin username",
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "SSO Server admin password",
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "admin",
+ "required": false
+ },
+ {
+ "description": "Realm to be created in the SSO server (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The username used to access the SSO service. This is used by clients to create the appliction client(s) within the specified SSO realm.",
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the SSO service user.",
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "sso-app-secret",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "redhat-sso70-openshift:1.3"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}",
+ "component": "server"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "SSO_ADMIN_USERNAME",
+ "value": "${SSO_ADMIN_USERNAME}"
+ },
+ {
+ "name": "SSO_ADMIN_PASSWORD",
+ "value": "${SSO_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_SERVICE_USERNAME",
+ "value": "${SSO_SERVICE_USERNAME}"
+ },
+ {
+ "name": "SSO_SERVICE_PASSWORD",
+ "value": "${SSO_SERVICE_PASSWORD}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}",
+ "component": "database"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 41ae07a48..c99452062 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -7,13 +7,6 @@
"""Ansible module for retrieving and setting openshift related facts"""
-try:
- # python2
- import ConfigParser
-except ImportError:
- # python3
- import configparser as ConfigParser
-
# pylint: disable=no-name-in-module, import-error, wrong-import-order
import copy
import errno
@@ -26,6 +19,8 @@ import struct
import socket
from distutils.util import strtobool
from distutils.version import LooseVersion
+from six import string_types, text_type
+from six.moves import configparser
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
@@ -87,7 +82,7 @@ def migrate_docker_facts(facts):
# log_options was originally meant to be a comma separated string, but
# we now prefer an actual list, with backward compatibility:
if 'log_options' in facts['docker'] and \
- isinstance(facts['docker']['log_options'], basestring):
+ isinstance(facts['docker']['log_options'], string_types):
facts['docker']['log_options'] = facts['docker']['log_options'].split(",")
return facts
@@ -226,7 +221,7 @@ def choose_hostname(hostnames=None, fallback=''):
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
- ips = [i for i in hostnames if i is not None and isinstance(i, basestring) and re.match(ip_regex, i)]
+ ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
@@ -363,7 +358,7 @@ def normalize_aws_facts(metadata, facts):
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
for ips_var, int_var in iteritems(var_map):
ips = interface.get(int_var)
- if isinstance(ips, basestring):
+ if isinstance(ips, string_types):
int_info[ips_var] = [ips]
else:
int_info[ips_var] = ips
@@ -772,9 +767,9 @@ def set_etcd_facts_if_unset(facts):
# Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
try:
# Add a fake section for parsing:
- ini_str = unicode('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
+ ini_str = text_type('[root]\n' + open('/etc/etcd/etcd.conf', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
- config = ConfigParser.RawConfigParser()
+ config = configparser.RawConfigParser()
config.readfp(ini_fp)
etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
@@ -872,6 +867,20 @@ def set_deployment_facts_if_unset(facts):
return facts
+def set_evacuate_or_drain_option(facts):
+ """OCP before 1.5/3.5 used '--evacuate'. As of 1.5/3.5 OCP uses
+'--drain'. Let's make that a fact for easy reference later.
+ """
+ if facts['common']['version_gte_3_5_or_1_5']:
+ # New-style
+ facts['common']['evacuate_or_drain'] = '--drain'
+ else:
+ # Old-style
+ facts['common']['evacuate_or_drain'] = '--evacuate'
+
+ return facts
+
+
def set_version_facts_if_unset(facts):
""" Set version facts. This currently includes common.version and
common.version_gte_3_1_or_1_1.
@@ -894,23 +903,31 @@ def set_version_facts_if_unset(facts):
version_gte_3_2_or_1_2 = version >= LooseVersion('1.2.0')
version_gte_3_3_or_1_3 = version >= LooseVersion('1.3.0')
version_gte_3_4_or_1_4 = version >= LooseVersion('1.4.0')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('1.5.0')
+ version_gte_3_6_or_1_6 = version >= LooseVersion('1.6.0')
else:
version_gte_3_1_or_1_1 = version >= LooseVersion('3.0.2.905')
version_gte_3_1_1_or_1_1_1 = version >= LooseVersion('3.1.1')
version_gte_3_2_or_1_2 = version >= LooseVersion('3.1.1.901')
version_gte_3_3_or_1_3 = version >= LooseVersion('3.3.0')
version_gte_3_4_or_1_4 = version >= LooseVersion('3.4.0')
+ version_gte_3_5_or_1_5 = version >= LooseVersion('3.5.0')
+ version_gte_3_6_or_1_6 = version >= LooseVersion('3.6.0')
else:
version_gte_3_1_or_1_1 = True
version_gte_3_1_1_or_1_1_1 = True
version_gte_3_2_or_1_2 = True
version_gte_3_3_or_1_3 = True
version_gte_3_4_or_1_4 = False
+ version_gte_3_5_or_1_5 = False
+ version_gte_3_6_or_1_6 = False
facts['common']['version_gte_3_1_or_1_1'] = version_gte_3_1_or_1_1
facts['common']['version_gte_3_1_1_or_1_1_1'] = version_gte_3_1_1_or_1_1_1
facts['common']['version_gte_3_2_or_1_2'] = version_gte_3_2_or_1_2
facts['common']['version_gte_3_3_or_1_3'] = version_gte_3_3_or_1_3
facts['common']['version_gte_3_4_or_1_4'] = version_gte_3_4_or_1_4
+ facts['common']['version_gte_3_5_or_1_5'] = version_gte_3_5_or_1_5
+ facts['common']['version_gte_3_6_or_1_6'] = version_gte_3_6_or_1_6
if version_gte_3_4_or_1_4:
examples_content_version = 'v1.4'
@@ -1229,10 +1246,10 @@ def build_api_server_args(facts):
def is_service_running(service):
""" Queries systemd through dbus to see if the service is running """
service_running = False
- bus = SystemBus()
- systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
- manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
try:
+ bus = SystemBus()
+ systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
+ manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
@@ -1241,11 +1258,20 @@ def is_service_running(service):
if service_load_state == 'loaded' and service_active_state == 'active':
service_running = True
except DBusException:
+ # TODO: do not swallow exception, as it may be hiding useful debugging
+ # information.
pass
return service_running
+def rpm_rebuilddb():
+ """
+ Runs rpm --rebuilddb to ensure the db is in good shape.
+ """
+ module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
+
+
def get_version_output(binary, version_cmd):
""" runs and returns the version output for a command """
cmd = []
@@ -1280,15 +1306,14 @@ def get_hosted_registry_insecure():
hosted_registry_insecure = None
if os.path.exists('/etc/sysconfig/docker'):
try:
- ini_str = unicode('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
+ ini_str = text_type('[root]\n' + open('/etc/sysconfig/docker', 'r').read(), 'utf-8')
ini_fp = io.StringIO(ini_str)
- config = ConfigParser.RawConfigParser()
+ config = configparser.RawConfigParser()
config.readfp(ini_fp)
options = config.get('root', 'OPTIONS')
if 'insecure-registry' in options:
hosted_registry_insecure = True
- # pylint: disable=bare-except
- except:
+ except Exception: # pylint: disable=broad-except
pass
return hosted_registry_insecure
@@ -1440,7 +1465,9 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
# here, just completely overwrite with the new if they are present there.
inventory_json_facts = ['admission_plugin_config',
'kube_admission_plugin_config',
- 'image_policy_config']
+ 'image_policy_config',
+ "builddefaults",
+ "buildoverrides"]
facts = dict()
for key, value in iteritems(orig):
@@ -1449,7 +1476,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
if key in inventory_json_facts:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if isinstance(new[key], basestring):
+ if isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
@@ -1511,7 +1538,7 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
for key in new_keys:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
- if key in inventory_json_facts and isinstance(new[key], basestring):
+ if key in inventory_json_facts and isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
@@ -1552,15 +1579,15 @@ def get_local_facts_from_file(filename):
local_facts = dict()
try:
# Handle conversion of INI style facts file to json style
- ini_facts = ConfigParser.SafeConfigParser()
+ ini_facts = configparser.SafeConfigParser()
ini_facts.read(filename)
for section in ini_facts.sections():
local_facts[section] = dict()
for key, value in ini_facts.items(section):
local_facts[section][key] = value
- except (ConfigParser.MissingSectionHeaderError,
- ConfigParser.ParsingError):
+ except (configparser.MissingSectionHeaderError,
+ configparser.ParsingError):
try:
with open(filename, 'r') as facts_file:
local_facts = json.load(facts_file)
@@ -1600,11 +1627,7 @@ def safe_get_bool(fact):
def set_proxy_facts(facts):
- """ Set global proxy facts and promote defaults from http_proxy, https_proxy,
- no_proxy to the more specific builddefaults and builddefaults_git vars.
- 1. http_proxy, https_proxy, no_proxy
- 2. builddefaults_*
- 3. builddefaults_git_*
+ """ Set global proxy facts
Args:
facts(dict): existing facts
@@ -1614,7 +1637,7 @@ def set_proxy_facts(facts):
if 'common' in facts:
common = facts['common']
if 'http_proxy' in common or 'https_proxy' in common:
- if 'no_proxy' in common and isinstance(common['no_proxy'], basestring):
+ if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
common['no_proxy'] = common['no_proxy'].split(",")
elif 'no_proxy' not in common:
common['no_proxy'] = []
@@ -1626,6 +1649,21 @@ def set_proxy_facts(facts):
common['no_proxy'].append(common['hostname'])
common['no_proxy'] = sort_unique(common['no_proxy'])
facts['common'] = common
+ return facts
+
+
+def set_builddefaults_facts(facts):
+ """ Set build defaults including setting proxy values from http_proxy, https_proxy,
+ no_proxy to the more specific builddefaults and builddefaults_git vars.
+ 1. http_proxy, https_proxy, no_proxy
+ 2. builddefaults_*
+ 3. builddefaults_git_*
+
+ Args:
+ facts(dict): existing facts
+ Returns:
+ facts(dict): Updated facts with missing values
+ """
if 'builddefaults' in facts:
builddefaults = facts['builddefaults']
@@ -1635,24 +1673,71 @@ def set_proxy_facts(facts):
builddefaults['http_proxy'] = common['http_proxy']
if 'https_proxy' not in builddefaults and 'https_proxy' in common:
builddefaults['https_proxy'] = common['https_proxy']
- # make no_proxy into a list if it's not
- if 'no_proxy' in builddefaults and isinstance(builddefaults['no_proxy'], basestring):
- builddefaults['no_proxy'] = builddefaults['no_proxy'].split(",")
if 'no_proxy' not in builddefaults and 'no_proxy' in common:
builddefaults['no_proxy'] = common['no_proxy']
+
+ # Create git specific facts from generic values, if git specific values are
+ # not defined.
if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
builddefaults['git_http_proxy'] = builddefaults['http_proxy']
if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
builddefaults['git_https_proxy'] = builddefaults['https_proxy']
- # If we're actually defining a proxy config then create admission_plugin_config
- # if it doesn't exist, then merge builddefaults[config] structure
- # into admission_plugin_config
- if 'config' in builddefaults and ('http_proxy' in builddefaults or
- 'https_proxy' in builddefaults):
+ if 'git_no_proxy' not in builddefaults and 'no_proxy' in builddefaults:
+ builddefaults['git_no_proxy'] = builddefaults['no_proxy']
+ # If we're actually defining a builddefaults config then create admission_plugin_config
+ # then merge builddefaults[config] structure into admission_plugin_config
+ if 'config' in builddefaults:
if 'admission_plugin_config' not in facts['master']:
facts['master']['admission_plugin_config'] = dict()
facts['master']['admission_plugin_config'].update(builddefaults['config'])
- facts['builddefaults'] = builddefaults
+ # if the user didn't actually provide proxy values, delete the proxy env variable defaults.
+ delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
+
+ return facts
+
+
+def delete_empty_keys(keylist):
+ """ Delete dictionary elements from keylist where "value" is empty.
+
+ Args:
+ keylist(list): A list of builddefault configuration envs.
+
+ Returns:
+ none
+
+ Example:
+ keylist = [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
+ {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
+ {'name': 'NO_PROXY', 'value': ''}]
+
+ After calling delete_empty_keys the provided list is modified to become:
+
+ [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
+ {'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}]
+ """
+ count = 0
+ for i in range(0, len(keylist)):
+ if len(keylist[i - count]['value']) == 0:
+ del keylist[i - count]
+ count += 1
+
+
+def set_buildoverrides_facts(facts):
+ """ Set build overrides
+
+ Args:
+ facts(dict): existing facts
+ Returns:
+ facts(dict): Updated facts with missing values
+ """
+ if 'buildoverrides' in facts:
+ buildoverrides = facts['buildoverrides']
+ # If we're actually defining a buildoverrides config then create admission_plugin_config
+ # then merge buildoverrides[config] structure into admission_plugin_config
+ if 'config' in buildoverrides:
+ if 'admission_plugin_config' not in facts['master']:
+ facts['master']['admission_plugin_config'] = dict()
+ facts['master']['admission_plugin_config'].update(buildoverrides['config'])
return facts
@@ -1791,6 +1876,7 @@ class OpenShiftFacts(object):
OpenShiftFactsUnsupportedRoleError:
"""
known_roles = ['builddefaults',
+ 'buildoverrides',
'clock',
'cloudprovider',
'common',
@@ -1889,11 +1975,14 @@ class OpenShiftFacts(object):
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
+ facts = set_evacuate_or_drain_option(facts)
facts = set_dnsmasq_facts_if_unset(facts)
facts = set_manageiq_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_etcd_facts_if_unset(facts)
facts = set_proxy_facts(facts)
+ facts = set_builddefaults_facts(facts)
+ facts = set_buildoverrides_facts(facts)
if not safe_get_bool(facts['common']['is_containerized']):
facts = set_installed_variant_rpm_facts(facts)
facts = set_nodename(facts)
@@ -1957,6 +2046,11 @@ class OpenShiftFacts(object):
if 'docker' in roles:
docker = dict(disable_push_dockerhub=False,
options='--log-driver=json-file --log-opt max-size=50m')
+ # NOTE: This is a workaround for a dnf output racecondition that can occur in
+ # some situations. See https://bugzilla.redhat.com/show_bug.cgi?id=918184
+ if self.system_facts['ansible_pkg_mgr'] == 'dnf':
+ rpm_rebuilddb()
+
version_info = get_docker_version_info()
if version_info is not None:
docker['api_version'] = version_info['api_version']
@@ -2220,12 +2314,12 @@ class OpenShiftFacts(object):
key = '{0}_registries'.format(cat)
if key in new_local_facts['docker']:
val = new_local_facts['docker'][key]
- if isinstance(val, basestring):
+ if isinstance(val, string_types):
val = [x.strip() for x in val.split(',')]
new_local_facts['docker'][key] = list(set(val) - set(['']))
# Convert legacy log_options comma sep string to a list if present:
if 'log_options' in new_local_facts['docker'] and \
- isinstance(new_local_facts['docker']['log_options'], basestring):
+ isinstance(new_local_facts['docker']['log_options'], string_types):
new_local_facts['docker']['log_options'] = new_local_facts['docker']['log_options'].split(',')
new_local_facts = self.remove_empty_facts(new_local_facts)
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index 70cf49dd4..b7b521f1a 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -10,11 +10,9 @@
- set_fact:
l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}"
-- name: Ensure PyYaml and yum-utils are installed
+- name: Ensure various deps are installed
package: name={{ item }} state=present
- with_items:
- - PyYAML
- - yum-utils
+ with_items: "{{ required_packages }}"
when: not l_is_atomic | bool
- name: Gather Cluster facts and set is_containerized if needed
diff --git a/roles/openshift_facts/vars/main.yml b/roles/openshift_facts/vars/main.yml
new file mode 100644
index 000000000..9c3110ff6
--- /dev/null
+++ b/roles/openshift_facts/vars/main.yml
@@ -0,0 +1,7 @@
+---
+required_packages:
+ - iproute
+ - python-dbus
+ - python-six
+ - PyYAML
+ - yum-utils
diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml
index 74c50ae1d..ca5e88b15 100644
--- a/roles/openshift_hosted/meta/main.yml
+++ b/roles/openshift_hosted/meta/main.yml
@@ -11,4 +11,23 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: openshift_cli
+- role: openshift_hosted_facts
+- role: openshift_projects
+ openshift_projects: "{{ openshift_additional_projects | default({}) | oo_merge_dicts({'default':{'default_node_selector':''},'openshift-infra':{'default_node_selector':''},'logging':{'default_node_selector':''}}) }}"
+- role: openshift_serviceaccounts
+ openshift_serviceaccounts_names:
+ - router
+ openshift_serviceaccounts_namespace: default
+ openshift_serviceaccounts_sccs:
+ - hostnetwork
+ when: openshift.common.version_gte_3_2_or_1_2
+- role: openshift_serviceaccounts
+ openshift_serviceaccounts_names:
+ - router
+ - registry
+ openshift_serviceaccounts_namespace: default
+ openshift_serviceaccounts_sccs:
+ - privileged
+ when: not openshift.common.version_gte_3_2_or_1_2
diff --git a/roles/openshift_hosted/tasks/registry/secure.yml b/roles/openshift_hosted/tasks/registry/secure.yml
index b6d007835..d87a3847c 100644
--- a/roles/openshift_hosted/tasks/registry/secure.yml
+++ b/roles/openshift_hosted/tasks/registry/secure.yml
@@ -34,9 +34,9 @@
- name: Create registry certificates if they do not exist
command: >
{{ openshift.common.client_binary }} adm ca create-server-cert
- --signer-cert=/etc/origin/master/ca.crt
- --signer-key=/etc/origin/master/ca.key
- --signer-serial=/etc/origin/master/ca.serial.txt
+ --signer-cert={{ openshift_master_config_dir }}/ca.crt
+ --signer-key={{ openshift_master_config_dir }}/ca.key
+ --signer-serial={{ openshift_master_config_dir }}/ca.serial.txt
--hostnames="{{ docker_registry_service_ip.stdout }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}"
--cert={{ openshift_master_config_dir }}/registry.crt
--key={{ openshift_master_config_dir }}/registry.key
@@ -65,12 +65,12 @@
- name: Determine if registry-certificates secret volume attached
command: >
{{ openshift.common.client_binary }} get dc/docker-registry
- -o jsonpath='{.spec.template.spec.volumes[*].secret.secretName}'
+ -o jsonpath='{.spec.template.spec.volumes[?(@.secret)].secret.secretName}'
--config={{ openshift_hosted_kubeconfig }}
-n default
register: docker_registry_volumes
changed_when: false
- failed_when: "'secretName is not found' not in docker_registry_volumes.stdout and docker_registry_volumes.rc != 0"
+ failed_when: "docker_registry_volumes.stdout != '' and 'secretName is not found' not in docker_registry_volumes.stdout and docker_registry_volumes.rc != 0"
- name: Attach registry-certificates secret volume
command: >
diff --git a/roles/openshift_hosted_metrics/README.md b/roles/openshift_hosted_metrics/README.md
new file mode 100644
index 000000000..c2af3c494
--- /dev/null
+++ b/roles/openshift_hosted_metrics/README.md
@@ -0,0 +1,54 @@
+OpenShift Metrics with Hawkular
+====================
+
+OpenShift Metrics Installation
+
+Requirements
+------------
+
+* Ansible 2.2
+* It requires subdomain fqdn to be set.
+* If persistence is enabled, then it also requires NFS.
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value | |
+|-------------------------------------------------|-----------------------|-------------------------------------------------------------|
+| openshift_hosted_metrics_deploy | `False` | If metrics should be deployed |
+| openshift_hosted_metrics_public_url | null | Hawkular metrics public url |
+| openshift_hosted_metrics_storage_nfs_directory | `/exports` | Root export directory. |
+| openshift_hosted_metrics_storage_volume_name | `metrics` | Metrics volume within openshift_hosted_metrics_volume_dir |
+| openshift_hosted_metrics_storage_volume_size | `10Gi` | Metrics volume size |
+| openshift_hosted_metrics_storage_nfs_options | `*(rw,root_squash)` | NFS options for configured exports. |
+| openshift_hosted_metrics_duration | `7` | Metrics query duration |
+| openshift_hosted_metrics_resolution | `10s` | Metrics resolution |
+
+
+Dependencies
+------------
+openshift_facts
+openshift_examples
+openshift_master_facts
+
+Example Playbook
+----------------
+
+```
+- name: Configure openshift-metrics
+ hosts: oo_first_master
+ roles:
+ - role: openshift_hosted_metrics
+```
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Jose David Martín (j.david.nieto@gmail.com)
diff --git a/roles/openshift_metrics/defaults/main.yml b/roles/openshift_hosted_metrics/defaults/main.yml
index a01f24df8..a01f24df8 100644
--- a/roles/openshift_metrics/defaults/main.yml
+++ b/roles/openshift_hosted_metrics/defaults/main.yml
diff --git a/roles/openshift_metrics/handlers/main.yml b/roles/openshift_hosted_metrics/handlers/main.yml
index 69c5a1663..69c5a1663 100644
--- a/roles/openshift_metrics/handlers/main.yml
+++ b/roles/openshift_hosted_metrics/handlers/main.yml
diff --git a/roles/openshift_hosted_metrics/meta/main.yaml b/roles/openshift_hosted_metrics/meta/main.yaml
new file mode 100644
index 000000000..debca3ca6
--- /dev/null
+++ b/roles/openshift_hosted_metrics/meta/main.yaml
@@ -0,0 +1,18 @@
+---
+galaxy_info:
+ author: David Martín
+ description:
+ company:
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: openshift_examples }
+- { role: openshift_facts }
+- { role: openshift_master_facts }
diff --git a/roles/openshift_metrics/tasks/install.yml b/roles/openshift_hosted_metrics/tasks/install.yml
index 0f520e685..2c839996e 100644
--- a/roles/openshift_metrics/tasks/install.yml
+++ b/roles/openshift_hosted_metrics/tasks/install.yml
@@ -3,7 +3,7 @@
- name: Test if metrics-deployer service account exists
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace=openshift-infra
get serviceaccount metrics-deployer -o json
register: serviceaccount
@@ -14,7 +14,7 @@
shell: >
echo {{ metrics_deployer_sa | to_json | quote }} |
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
create -f -
when: serviceaccount.rc == 1
@@ -22,7 +22,7 @@
- name: Test edit permissions
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
get rolebindings -o jsonpath='{.items[?(@.metadata.name == "edit")].userNames}'
register: edit_rolebindings
@@ -31,7 +31,7 @@
- name: Add edit permission to the openshift-infra project to metrics-deployer SA
command: >
{{ openshift.common.client_binary }} adm
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
policy add-role-to-user edit
system:serviceaccount:openshift-infra:metrics-deployer
@@ -40,7 +40,7 @@
- name: Test hawkular view permissions
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
get rolebindings -o jsonpath='{.items[?(@.metadata.name == "view")].userNames}'
register: view_rolebindings
@@ -49,7 +49,7 @@
- name: Add view permissions to hawkular SA
command: >
{{ openshift.common.client_binary }} adm
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
policy add-role-to-user view
system:serviceaccount:openshift-infra:hawkular
@@ -58,7 +58,7 @@
- name: Test cluster-reader permissions
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
get clusterrolebindings -o jsonpath='{.items[?(@.metadata.name == "cluster-reader")].userNames}'
register: cluster_reader_clusterrolebindings
@@ -67,7 +67,7 @@
- name: Add cluster-reader permission to the openshift-infra project to heapster SA
command: >
{{ openshift.common.client_binary }} adm
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
policy add-cluster-role-to-user cluster-reader
system:serviceaccount:openshift-infra:heapster
@@ -76,7 +76,7 @@
- name: Create metrics-deployer secret
command: >
{{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
--namespace openshift-infra
secrets new metrics-deployer nothing=/dev/null
register: metrics_deployer_secret
@@ -98,7 +98,7 @@
{{ image_version }} \
-v MODE={{ deployment_mode }} \
| {{ openshift.common.client_binary }} --namespace openshift-infra \
- --config={{ openshift_metrics_kubeconfig }} \
+ --config={{ openshift_hosted_metrics_kubeconfig }} \
create -o name -f -"
- name: Deploy Metrics
@@ -116,7 +116,7 @@
shell: >
{{ openshift.common.client_binary }}
--namespace openshift-infra
- --config={{ openshift_metrics_kubeconfig }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
get {{ deploy_metrics.stdout }}
register: deploy_result
until: "{{ 'Completed' in deploy_result.stdout }}"
diff --git a/roles/openshift_hosted_metrics/tasks/main.yaml b/roles/openshift_hosted_metrics/tasks/main.yaml
new file mode 100644
index 000000000..5ce8aa92b
--- /dev/null
+++ b/roles/openshift_hosted_metrics/tasks/main.yaml
@@ -0,0 +1,75 @@
+---
+- name: Create temp directory for kubeconfig
+ command: mktemp -d /tmp/openshift-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- name: Record kubeconfig tmp dir
+ set_fact:
+ openshift_hosted_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_hosted_metrics_kubeconfig }}
+ changed_when: False
+
+- name: Set hosted metrics facts
+ openshift_facts:
+ role: hosted
+ openshift_env: "{{ hostvars
+ | oo_merge_hostvars(vars, inventory_hostname)
+ | oo_openshift_env }}"
+ openshift_env_structures:
+ - 'openshift.hosted.metrics.*'
+
+- set_fact:
+ metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}"
+ metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}"
+ metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
+ image_prefix: "{{ '-v IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer.prefix if 'prefix' in openshift.hosted.metrics.deployer else '' }}"
+ image_version: "{{ '-v IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer.version if 'version' in openshift.hosted.metrics.deployer else '' }}"
+
+
+- name: Check for existing metrics pods
+ shell: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
+ --namespace openshift-infra
+ get pods -l {{ item }} | grep -q Running
+ register: metrics_pods_status
+ with_items:
+ - metrics-infra=hawkular-metrics
+ - metrics-infra=heapster
+ - metrics-infra=hawkular-cassandra
+ failed_when: false
+ changed_when: false
+
+- name: Check for previous deployer
+ shell: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift_hosted_metrics_kubeconfig }}
+ --namespace openshift-infra
+ get pods -l metrics-infra=deployer --sort-by='{.metadata.creationTimestamp}' | tail -1 | grep metrics-deployer-
+ register: metrics_deployer_status
+ failed_when: false
+ changed_when: false
+
+- name: Record current deployment status
+ set_fact:
+ greenfield: "{{ not metrics_deployer_status.rc == 0 }}"
+ failed_error: "{{ True if 'Error' in metrics_deployer_status.stdout else False }}"
+ metrics_running: "{{ metrics_pods_status.results | oo_collect(attribute='rc') == [0,0,0] }}"
+
+- name: Set deployment mode
+ set_fact:
+ deployment_mode: "{{ 'refresh' if (failed_error | bool or metrics_upgrade | bool) else 'deploy' }}"
+
+# TODO: handle non greenfield deployments in the future
+- include: install.yml
+ when: greenfield
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_hosted_metrics/vars/main.yaml b/roles/openshift_hosted_metrics/vars/main.yaml
new file mode 100644
index 000000000..6c207d6ac
--- /dev/null
+++ b/roles/openshift_hosted_metrics/vars/main.yaml
@@ -0,0 +1,21 @@
+---
+hawkular_permission_oc_commands:
+ - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
+ - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
+
+metrics_deployer_sa:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: metrics-deployer
+ secrets:
+ - name: metrics-deployer
+
+
+hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig
+
+hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}true{% else %}false{% endif %}"
+
+hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
+
+metrics_upgrade: openshift.hosted.metrics.upgrade | default(False)
diff --git a/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml
index 13cef2d66..c47d5361d 100644
--- a/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.3/enterprise/logging-deployer.yaml
@@ -72,7 +72,6 @@ items:
metadata:
name: logging-deployer-edit-role
roleRef:
- kind: ClusterRole
name: edit
subjects:
- kind: ServiceAccount
@@ -83,7 +82,6 @@ items:
metadata:
name: logging-deployer-dsadmin-role
roleRef:
- kind: ClusterRole
name: daemonset-admin
subjects:
- kind: ServiceAccount
diff --git a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
index ddfda1272..c67058696 100644
--- a/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
+++ b/roles/openshift_hosted_templates/files/v1.4/enterprise/logging-deployer.yaml
@@ -81,7 +81,6 @@ items:
metadata:
name: logging-deployer-edit-role
roleRef:
- kind: ClusterRole
name: edit
subjects:
- kind: ServiceAccount
@@ -92,7 +91,6 @@ items:
metadata:
name: logging-deployer-dsadmin-role
roleRef:
- kind: ClusterRole
name: daemonset-admin
subjects:
- kind: ServiceAccount
@@ -103,7 +101,6 @@ items:
metadata:
name: logging-elasticsearch-view-role
roleRef:
- kind: ClusterRole
name: view
subjects:
- kind: ServiceAccount
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml
new file mode 100644
index 000000000..c67058696
--- /dev/null
+++ b/roles/openshift_hosted_templates/files/v1.5/enterprise/logging-deployer.yaml
@@ -0,0 +1,342 @@
+apiVersion: "v1"
+kind: "List"
+items:
+-
+ apiVersion: "v1"
+ kind: "Template"
+ metadata:
+ name: logging-deployer-account-template
+ annotations:
+ description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
+ tags: "infrastructure"
+ objects:
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ name: logging-deployer
+ metadata:
+ name: logging-deployer
+ labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-kibana
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-elasticsearch
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-fluentd
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-curator
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: oauth-editor
+ rules:
+ - resources:
+ - oauthclients
+ verbs:
+ - create
+ - delete
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: daemonset-admin
+ rules:
+ - resources:
+ - daemonsets
+ apiGroups:
+ - extensions
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - delete
+ - update
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: rolebinding-reader
+ rules:
+ - resources:
+ - clusterrolebindings
+ verbs:
+ - get
+ -
+ apiVersion: v1
+ kind: RoleBinding
+ metadata:
+ name: logging-deployer-edit-role
+ roleRef:
+ name: edit
+ subjects:
+ - kind: ServiceAccount
+ name: logging-deployer
+ -
+ apiVersion: v1
+ kind: RoleBinding
+ metadata:
+ name: logging-deployer-dsadmin-role
+ roleRef:
+ name: daemonset-admin
+ subjects:
+ - kind: ServiceAccount
+ name: logging-deployer
+ -
+ apiVersion: v1
+ kind: RoleBinding
+ metadata:
+ name: logging-elasticsearch-view-role
+ roleRef:
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
+-
+ apiVersion: "v1"
+ kind: "Template"
+ metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
+ tags: "infrastructure"
+ labels:
+ logging-infra: deployer
+ provider: openshift
+ objects:
+ -
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployer:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: IMAGE_PULL_SECRET
+ value: ${IMAGE_PULL_SECRET}
+ - name: INSECURE_REGISTRY
+ value: ${INSECURE_REGISTRY}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_PVC_SIZE
+ value: ${ES_PVC_SIZE}
+ - name: ES_PVC_PREFIX
+ value: ${ES_PVC_PREFIX}
+ - name: ES_PVC_DYNAMIC
+ value: ${ES_PVC_DYNAMIC}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_PVC_SIZE
+ value: ${ES_OPS_PVC_SIZE}
+ - name: ES_OPS_PVC_PREFIX
+ value: ${ES_OPS_PVC_PREFIX}
+ - name: ES_OPS_PVC_DYNAMIC
+ value: ${ES_OPS_PVC_DYNAMIC}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ - name: FLUENTD_NODESELECTOR
+ value: ${FLUENTD_NODESELECTOR}
+ - name: ES_NODESELECTOR
+ value: ${ES_NODESELECTOR}
+ - name: ES_OPS_NODESELECTOR
+ value: ${ES_OPS_NODESELECTOR}
+ - name: KIBANA_NODESELECTOR
+ value: ${KIBANA_NODESELECTOR}
+ - name: KIBANA_OPS_NODESELECTOR
+ value: ${KIBANA_OPS_NODESELECTOR}
+ - name: CURATOR_NODESELECTOR
+ value: ${CURATOR_NODESELECTOR}
+ - name: CURATOR_OPS_NODESELECTOR
+ value: ${CURATOR_OPS_NODESELECTOR}
+ - name: MODE
+ value: ${MODE}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ parameters:
+ -
+ description: "The mode that the deployer runs in."
+ name: MODE
+ value: "install"
+ -
+ description: 'Specify prefix for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set prefix "registry.access.redhat.com/openshift3/"'
+ name: IMAGE_PREFIX
+ value: "registry.access.redhat.com/openshift3/"
+ -
+ description: 'Specify version for logging components; e.g. for "registry.access.redhat.com/openshift3/logging-deployer:3.4.0", set version "3.4.0"'
+ name: IMAGE_VERSION
+ value: "3.4.0"
+ -
+ description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
+ name: IMAGE_PULL_SECRET
+ -
+ description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
+ name: INSECURE_REGISTRY
+ value: "false"
+ -
+ description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+ -
+ description: "(Deprecated) External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ value: "kibana.example.com"
+ -
+ description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+ -
+ description: "(Deprecated) External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ value: "https://localhost:8443"
+ -
+ description: "(Deprecated) Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+ -
+ description: "(Deprecated) How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ value: "1"
+ -
+ description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+ -
+ description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+ name: ES_PVC_SIZE
+ -
+ description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
+ name: ES_PVC_PREFIX
+ value: "logging-es-"
+ -
+ description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
+ name: ES_PVC_DYNAMIC
+ -
+ description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+ -
+ description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+ -
+ description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+ -
+ description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+ -
+ description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+ -
+ description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+ -
+ description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+ name: ES_OPS_PVC_SIZE
+ -
+ description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
+ name: ES_OPS_PVC_PREFIX
+ value: "logging-es-ops-"
+ -
+ description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
+ name: ES_OPS_PVC_DYNAMIC
+ -
+ description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+ -
+ description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+ -
+ description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+ -
+ description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+ -
+ description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
+ name: FLUENTD_NODESELECTOR
+ value: "logging-infra-fluentd=true"
+ -
+ description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
+ name: ES_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
+ name: ES_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Kibana cluster (label=value)."
+ name: KIBANA_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
+ name: KIBANA_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Curator (label=value)."
+ name: CURATOR_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector operations Curator (label=value)."
+ name: CURATOR_OPS_NODESELECTOR
+ value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml
new file mode 100644
index 000000000..66051755c
--- /dev/null
+++ b/roles/openshift_hosted_templates/files/v1.5/enterprise/metrics-deployer.yaml
@@ -0,0 +1,168 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: metrics-deployer-template
+ annotations:
+ description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: metrics-deployer-
+ spec:
+ securityContext: {}
+ containers:
+ - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
+ name: deployer
+ securityContext: {}
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: MODE
+ value: ${MODE}
+ - name: CONTINUE_ON_ERROR
+ value: ${CONTINUE_ON_ERROR}
+ - name: REDEPLOY
+ value: ${REDEPLOY}
+ - name: IGNORE_PREFLIGHT
+ value: ${IGNORE_PREFLIGHT}
+ - name: USE_PERSISTENT_STORAGE
+ value: ${USE_PERSISTENT_STORAGE}
+ - name: DYNAMICALLY_PROVISION_STORAGE
+ value: ${DYNAMICALLY_PROVISION_STORAGE}
+ - name: HAWKULAR_METRICS_HOSTNAME
+ value: ${HAWKULAR_METRICS_HOSTNAME}
+ - name: CASSANDRA_NODES
+ value: ${CASSANDRA_NODES}
+ - name: CASSANDRA_PV_SIZE
+ value: ${CASSANDRA_PV_SIZE}
+ - name: METRIC_DURATION
+ value: ${METRIC_DURATION}
+ - name: USER_WRITE_ACCESS
+ value: ${USER_WRITE_ACCESS}
+ - name: HEAPSTER_NODE_ID
+ value: ${HEAPSTER_NODE_ID}
+ - name: METRIC_RESOLUTION
+ value: ${METRIC_RESOLUTION}
+ - name: STARTUP_TIMEOUT
+ value: ${STARTUP_TIMEOUT}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: metrics-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: metrics-deployer
+parameters:
+-
+ description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "registry.access.redhat.com/openshift3/"
+-
+ description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
+ name: IMAGE_VERSION
+ value: "3.4.0"
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc:443"
+-
+ description: "External hostname where clients will reach Hawkular Metrics"
+ name: HAWKULAR_METRICS_HOSTNAME
+ required: true
+-
+ description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
+ name: MODE
+ value: "deploy"
+-
+ description: "Set to true to continue even if the deployer runs into an error."
+ name: CONTINUE_ON_ERROR
+ value: "false"
+-
+ description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
+ name: REDEPLOY
+ value: "false"
+-
+ description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
+ name: IGNORE_PREFLIGHT
+ value: "false"
+-
+ description: "Set to true for persistent storage, set to false to use non persistent storage"
+ name: USE_PERSISTENT_STORAGE
+ value: "true"
+-
+ description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
+ name: DYNAMICALLY_PROVISION_STORAGE
+ value: "false"
+-
+ description: "The number of Cassandra Nodes to deploy for the initial cluster"
+ name: CASSANDRA_NODES
+ value: "1"
+-
+ description: "The persistent volume size for each of the Cassandra nodes"
+ name: CASSANDRA_PV_SIZE
+ value: "10Gi"
+-
+ description: "How many days metrics should be stored for."
+ name: METRIC_DURATION
+ value: "7"
+-
+ description: "If a user accounts should be allowed to write metrics."
+ name: USER_WRITE_ACCESS
+ value: "false"
+-
+ description: "The identifier used when generating metric ids in Hawkular"
+ name: HEAPSTER_NODE_ID
+ value: "nodename"
+-
+ description: "How often metrics should be gathered. Defaults value of '15s' for 15 seconds"
+ name: METRIC_RESOLUTION
+ value: "15s"
+-
+ description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
+ name: STARTUP_TIMEOUT
+ value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml
new file mode 100644
index 000000000..11478263c
--- /dev/null
+++ b/roles/openshift_hosted_templates/files/v1.5/enterprise/registry-console.yaml
@@ -0,0 +1,124 @@
+kind: Template
+apiVersion: v1
+metadata:
+ name: "registry-console"
+ annotations:
+ description: "Template for deploying registry web console. Requires cluster-admin."
+ tags: infrastructure
+labels:
+ createdBy: "registry-console-template"
+objects:
+ - kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: "registry-console"
+ labels:
+ name: "registry-console"
+ spec:
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "registry-console"
+ template:
+ metadata:
+ labels:
+ name: "registry-console"
+ spec:
+ containers:
+ - name: registry-console
+ image: ${IMAGE_PREFIX}registry-console:${IMAGE_VERSION}
+ ports:
+ - containerPort: 9090
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /ping
+ port: 9090
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /ping
+ port: 9090
+ scheme: HTTP
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ env:
+ - name: OPENSHIFT_OAUTH_PROVIDER_URL
+ value: "${OPENSHIFT_OAUTH_PROVIDER_URL}"
+ - name: OPENSHIFT_OAUTH_CLIENT_ID
+ value: "${OPENSHIFT_OAUTH_CLIENT_ID}"
+ - name: KUBERNETES_INSECURE
+ value: "false"
+ - name: COCKPIT_KUBE_INSECURE
+ value: "false"
+ - name: REGISTRY_ONLY
+ value: "true"
+ - name: REGISTRY_HOST
+ value: "${REGISTRY_HOST}"
+ - kind: Service
+ apiVersion: v1
+ metadata:
+ name: "registry-console"
+ labels:
+ name: "registry-console"
+ spec:
+ type: ClusterIP
+ ports:
+ - name: registry-console
+ protocol: TCP
+ port: 9000
+ targetPort: 9090
+ selector:
+ name: "registry-console"
+ - kind: ImageStream
+ apiVersion: v1
+ metadata:
+ name: registry-console
+ annotations:
+ description: Atomic Registry console
+ spec:
+ tags:
+ - annotations: null
+ from:
+ kind: DockerImage
+ name: ${IMAGE_PREFIX}registry-console
+ name: ${IMAGE_VERSION}
+ - kind: OAuthClient
+ apiVersion: v1
+ metadata:
+ name: "${OPENSHIFT_OAUTH_CLIENT_ID}"
+ respondWithChallenges: false
+ secret: "${OPENSHIFT_OAUTH_CLIENT_SECRET}"
+ redirectURIs:
+ - "${COCKPIT_KUBE_URL}"
+parameters:
+ - description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
+ name: IMAGE_PREFIX
+ value: "registry.access.redhat.com/openshift3/"
+ - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:3.3", set version "3.3"'
+ name: IMAGE_VERSION
+ value: "3.3"
+ - description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443"
+ name: OPENSHIFT_OAUTH_PROVIDER_URL
+ required: true
+ - description: "The registry console URL. This should be created beforehand using 'oc create route passthrough --service registry-console --port registry-console -n default', e.g. https://registry-console-default.example.com"
+ name: COCKPIT_KUBE_URL
+ required: true
+ - description: "Oauth client secret"
+ name: OPENSHIFT_OAUTH_CLIENT_SECRET
+ from: "user[a-zA-Z0-9]{64}"
+ generate: expression
+ - description: "Oauth client id"
+ name: OPENSHIFT_OAUTH_CLIENT_ID
+ value: "cockpit-oauth-client"
+ - description: "The integrated registry hostname exposed via route, e.g. registry.example.com"
+ name: REGISTRY_HOST
+ required: true
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml
new file mode 100644
index 000000000..bc8c79ca1
--- /dev/null
+++ b/roles/openshift_hosted_templates/files/v1.5/origin/logging-deployer.yaml
@@ -0,0 +1,345 @@
+apiVersion: "v1"
+kind: "List"
+items:
+-
+ apiVersion: "v1"
+ kind: "Template"
+ metadata:
+ name: logging-deployer-account-template
+ annotations:
+ description: "Template for creating the deployer account and roles needed for the aggregated logging deployer. Create as cluster-admin."
+ tags: "infrastructure"
+ objects:
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ name: logging-deployer
+ metadata:
+ name: logging-deployer
+ labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-kibana
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-elasticsearch
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-fluentd
+ -
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: aggregated-logging-curator
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: oauth-editor
+ rules:
+ - resources:
+ - oauthclients
+ verbs:
+ - create
+ - delete
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: daemonset-admin
+ rules:
+ - resources:
+ - daemonsets
+ apiGroups:
+ - extensions
+ verbs:
+ - create
+ - get
+ - list
+ - watch
+ - delete
+ - update
+ - apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: rolebinding-reader
+ rules:
+ - resources:
+ - clusterrolebindings
+ verbs:
+ - get
+ -
+ apiVersion: v1
+ kind: RoleBinding
+ metadata:
+ name: logging-deployer-edit-role
+ roleRef:
+ kind: ClusterRole
+ name: edit
+ subjects:
+ - kind: ServiceAccount
+ name: logging-deployer
+ -
+ apiVersion: v1
+ kind: RoleBinding
+ metadata:
+ name: logging-deployer-dsadmin-role
+ roleRef:
+ kind: ClusterRole
+ name: daemonset-admin
+ subjects:
+ - kind: ServiceAccount
+ name: logging-deployer
+ -
+ apiVersion: v1
+ kind: RoleBinding
+ metadata:
+ name: logging-elasticsearch-view-role
+ roleRef:
+ kind: ClusterRole
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
+-
+ apiVersion: "v1"
+ kind: "Template"
+ metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for running the aggregated logging deployer in a pod. Requires empowered 'logging-deployer' service account."
+ tags: "infrastructure"
+ labels:
+ logging-infra: deployer
+ provider: openshift
+ objects:
+ -
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: IMAGE_PULL_SECRET
+ value: ${IMAGE_PULL_SECRET}
+ - name: INSECURE_REGISTRY
+ value: ${INSECURE_REGISTRY}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_PVC_SIZE
+ value: ${ES_PVC_SIZE}
+ - name: ES_PVC_PREFIX
+ value: ${ES_PVC_PREFIX}
+ - name: ES_PVC_DYNAMIC
+ value: ${ES_PVC_DYNAMIC}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_PVC_SIZE
+ value: ${ES_OPS_PVC_SIZE}
+ - name: ES_OPS_PVC_PREFIX
+ value: ${ES_OPS_PVC_PREFIX}
+ - name: ES_OPS_PVC_DYNAMIC
+ value: ${ES_OPS_PVC_DYNAMIC}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ - name: FLUENTD_NODESELECTOR
+ value: ${FLUENTD_NODESELECTOR}
+ - name: ES_NODESELECTOR
+ value: ${ES_NODESELECTOR}
+ - name: ES_OPS_NODESELECTOR
+ value: ${ES_OPS_NODESELECTOR}
+ - name: KIBANA_NODESELECTOR
+ value: ${KIBANA_NODESELECTOR}
+ - name: KIBANA_OPS_NODESELECTOR
+ value: ${KIBANA_OPS_NODESELECTOR}
+ - name: CURATOR_NODESELECTOR
+ value: ${CURATOR_NODESELECTOR}
+ - name: CURATOR_OPS_NODESELECTOR
+ value: ${CURATOR_OPS_NODESELECTOR}
+ - name: MODE
+ value: ${MODE}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ parameters:
+ -
+ description: "The mode that the deployer runs in."
+ name: MODE
+ value: "install"
+ -
+ description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "docker.io/openshift/origin-"
+ -
+ description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "latest"
+ -
+ description: "(Deprecated) Specify the name of an existing pull secret to be used for pulling component images from an authenticated registry."
+ name: IMAGE_PULL_SECRET
+ -
+ description: "(Deprecated) Allow the registry for logging component images to be non-secure (not secured with a certificate signed by a known CA)"
+ name: INSECURE_REGISTRY
+ value: "false"
+ -
+ description: "(Deprecated) If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+ -
+ description: "(Deprecated) External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ value: "kibana.example.com"
+ -
+ description: "(Deprecated) External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+ -
+ description: "(Deprecated) External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ value: "https://localhost:8443"
+ -
+ description: "(Deprecated) Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+ -
+ description: "(Deprecated) How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ value: "1"
+ -
+ description: "(Deprecated) Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+ -
+ description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+ name: ES_PVC_SIZE
+ -
+ description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_PVC_SIZE."
+ name: ES_PVC_PREFIX
+ value: "logging-es-"
+ -
+ description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES PVC. '
+ name: ES_PVC_DYNAMIC
+ -
+ description: "(Deprecated) Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+ -
+ description: "(Deprecated) Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+ -
+ description: "(Deprecated) Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+ -
+ description: "(Deprecated) Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+ -
+ description: "(Deprecated) How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+ -
+ description: "(Deprecated) Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+ -
+ description: "(Deprecated) Size of the PersistentVolumeClaim to create per ElasticSearch ops instance, e.g. 100G. If empty, no PVCs will be created and emptyDir volumes are used instead."
+ name: ES_OPS_PVC_SIZE
+ -
+ description: "(Deprecated) Prefix for the names of PersistentVolumeClaims to be created; a number will be appended per instance. If they don't already exist, they will be created with size ES_OPS_PVC_SIZE."
+ name: ES_OPS_PVC_PREFIX
+ value: "logging-es-ops-"
+ -
+ description: '(Deprecated) Set to "true" to request dynamic provisioning (if enabled for your cluster) of a PersistentVolume for the ES ops PVC. '
+ name: ES_OPS_PVC_DYNAMIC
+ -
+ description: "(Deprecated) Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+ -
+ description: "(Deprecated) Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+ -
+ description: "(Deprecated) Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+ -
+ description: "(Deprecated) Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+ -
+ description: "(Deprecated) The nodeSelector used for the Fluentd DaemonSet."
+ name: FLUENTD_NODESELECTOR
+ value: "logging-infra-fluentd=true"
+ -
+ description: "(Deprecated) Node selector Elasticsearch cluster (label=value)."
+ name: ES_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Elasticsearch operations cluster (label=value)."
+ name: ES_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Kibana cluster (label=value)."
+ name: KIBANA_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Kibana operations cluster (label=value)."
+ name: KIBANA_OPS_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector Curator (label=value)."
+ name: CURATOR_NODESELECTOR
+ value: ""
+ -
+ description: "(Deprecated) Node selector operations Curator (label=value)."
+ name: CURATOR_OPS_NODESELECTOR
+ value: ""
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml
new file mode 100644
index 000000000..54691572a
--- /dev/null
+++ b/roles/openshift_hosted_templates/files/v1.5/origin/metrics-deployer.yaml
@@ -0,0 +1,168 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: metrics-deployer-template
+ annotations:
+ description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: metrics-deployer-
+ spec:
+ securityContext: {}
+ containers:
+ - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
+ name: deployer
+ securityContext: {}
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: MODE
+ value: ${MODE}
+ - name: CONTINUE_ON_ERROR
+ value: ${CONTINUE_ON_ERROR}
+ - name: REDEPLOY
+ value: ${REDEPLOY}
+ - name: IGNORE_PREFLIGHT
+ value: ${IGNORE_PREFLIGHT}
+ - name: USE_PERSISTENT_STORAGE
+ value: ${USE_PERSISTENT_STORAGE}
+ - name: DYNAMICALLY_PROVISION_STORAGE
+ value: ${DYNAMICALLY_PROVISION_STORAGE}
+ - name: HAWKULAR_METRICS_HOSTNAME
+ value: ${HAWKULAR_METRICS_HOSTNAME}
+ - name: CASSANDRA_NODES
+ value: ${CASSANDRA_NODES}
+ - name: CASSANDRA_PV_SIZE
+ value: ${CASSANDRA_PV_SIZE}
+ - name: METRIC_DURATION
+ value: ${METRIC_DURATION}
+ - name: USER_WRITE_ACCESS
+ value: ${USER_WRITE_ACCESS}
+ - name: HEAPSTER_NODE_ID
+ value: ${HEAPSTER_NODE_ID}
+ - name: METRIC_RESOLUTION
+ value: ${METRIC_RESOLUTION}
+ - name: STARTUP_TIMEOUT
+ value: ${STARTUP_TIMEOUT}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: metrics-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: metrics-deployer
+parameters:
+-
+ description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "openshift/origin-"
+-
+ description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:latest", set version "latest"'
+ name: IMAGE_VERSION
+ value: "latest"
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc:443"
+-
+ description: "External hostname where clients will reach Hawkular Metrics"
+ name: HAWKULAR_METRICS_HOSTNAME
+ required: true
+-
+ description: "Can be set to: 'preflight' to perform validation before a deployment; 'deploy' to perform an initial deployment; 'refresh' to delete and redeploy all components but to keep persisted data and routes; 'redeploy' to delete and redeploy everything (losing all data in the process); 'validate' to re-run validations after a deployment"
+ name: MODE
+ value: "deploy"
+-
+ description: "Set to true to continue even if the deployer runs into an error."
+ name: CONTINUE_ON_ERROR
+ value: "false"
+-
+ description: "(Deprecated) Turns 'deploy' mode into 'redeploy' mode, deleting and redeploying everything (losing all data in the process)"
+ name: REDEPLOY
+ value: "false"
+-
+ description: "If preflight validation is blocking deployment and you're sure you don't care about it, this will ignore the results and proceed to deploy."
+ name: IGNORE_PREFLIGHT
+ value: "false"
+-
+ description: "Set to true for persistent storage, set to false to use non persistent storage"
+ name: USE_PERSISTENT_STORAGE
+ value: "true"
+-
+ description: "Set to true to dynamically provision storage, set to false to use use pre-created persistent volumes"
+ name: DYNAMICALLY_PROVISION_STORAGE
+ value: "false"
+-
+ description: "The number of Cassandra Nodes to deploy for the initial cluster"
+ name: CASSANDRA_NODES
+ value: "1"
+-
+ description: "The persistent volume size for each of the Cassandra nodes"
+ name: CASSANDRA_PV_SIZE
+ value: "10Gi"
+-
+ description: "How many days metrics should be stored for."
+ name: METRIC_DURATION
+ value: "7"
+-
+ description: "If a user accounts should be allowed to write metrics."
+ name: USER_WRITE_ACCESS
+ value: "false"
+-
+ description: "The identifier used when generating metric ids in Hawkular"
+ name: HEAPSTER_NODE_ID
+ value: "nodename"
+-
+ description: "How often metrics should be gathered. Defaults value of '15s' for 15 seconds"
+ name: METRIC_RESOLUTION
+ value: "15s"
+-
+ description: "How long in seconds we should wait until Hawkular Metrics and Heapster starts up before attempting a restart"
+ name: STARTUP_TIMEOUT
+ value: "500"
diff --git a/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml
new file mode 100644
index 000000000..80cc4233b
--- /dev/null
+++ b/roles/openshift_hosted_templates/files/v1.5/origin/registry-console.yaml
@@ -0,0 +1,124 @@
+kind: Template
+apiVersion: v1
+metadata:
+ name: "registry-console"
+ annotations:
+ description: "Template for deploying registry web console. Requires cluster-admin."
+ tags: infrastructure
+labels:
+ createdBy: "registry-console-template"
+objects:
+ - kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: "registry-console"
+ labels:
+ name: "registry-console"
+ spec:
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "registry-console"
+ template:
+ metadata:
+ labels:
+ name: "registry-console"
+ spec:
+ containers:
+ - name: registry-console
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ ports:
+ - containerPort: 9090
+ protocol: TCP
+ livenessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /ping
+ port: 9090
+ scheme: HTTP
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ readinessProbe:
+ failureThreshold: 3
+ httpGet:
+ path: /ping
+ port: 9090
+ scheme: HTTP
+ periodSeconds: 10
+ successThreshold: 1
+ timeoutSeconds: 5
+ env:
+ - name: OPENSHIFT_OAUTH_PROVIDER_URL
+ value: "${OPENSHIFT_OAUTH_PROVIDER_URL}"
+ - name: OPENSHIFT_OAUTH_CLIENT_ID
+ value: "${OPENSHIFT_OAUTH_CLIENT_ID}"
+ - name: KUBERNETES_INSECURE
+ value: "false"
+ - name: COCKPIT_KUBE_INSECURE
+ value: "false"
+ - name: REGISTRY_ONLY
+ value: "true"
+ - name: REGISTRY_HOST
+ value: "${REGISTRY_HOST}"
+ - kind: Service
+ apiVersion: v1
+ metadata:
+ name: "registry-console"
+ labels:
+ name: "registry-console"
+ spec:
+ type: ClusterIP
+ ports:
+ - name: registry-console
+ protocol: TCP
+ port: 9000
+ targetPort: 9090
+ selector:
+ name: "registry-console"
+ - kind: ImageStream
+ apiVersion: v1
+ metadata:
+ name: registry-console
+ annotations:
+ description: Atomic Registry console
+ spec:
+ tags:
+ - annotations: null
+ from:
+ kind: DockerImage
+ name: ${IMAGE_NAME}
+ name: ${IMAGE_VERSION}
+ - kind: OAuthClient
+ apiVersion: v1
+ metadata:
+ name: "${OPENSHIFT_OAUTH_CLIENT_ID}"
+ respondWithChallenges: false
+ secret: "${OPENSHIFT_OAUTH_CLIENT_SECRET}"
+ redirectURIs:
+ - "${COCKPIT_KUBE_URL}"
+parameters:
+ - description: "Container image name"
+ name: IMAGE_NAME
+ value: "cockpit/kubernetes"
+ - description: 'Specify image version; e.g. for "cockpit/kubernetes:latest", set version "latest"'
+ name: IMAGE_VERSION
+ value: latest
+ - description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443"
+ name: OPENSHIFT_OAUTH_PROVIDER_URL
+ required: true
+ - description: "The registry console URL. This should be created beforehand using 'oc create route passthrough --service registry-console --port registry-console -n default', e.g. https://registry-console-default.example.com"
+ name: COCKPIT_KUBE_URL
+ required: true
+ - description: "Oauth client secret"
+ name: OPENSHIFT_OAUTH_CLIENT_SECRET
+ from: "user[a-zA-Z0-9]{64}"
+ generate: expression
+ - description: "Oauth client id"
+ name: OPENSHIFT_OAUTH_CLIENT_ID
+ value: "cockpit-oauth-client"
+ - description: "The integrated registry hostname exposed via route, e.g. registry.example.com"
+ name: REGISTRY_HOST
+ required: true
diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml
index d096019af..6190383b6 100644
--- a/roles/openshift_loadbalancer/defaults/main.yml
+++ b/roles/openshift_loadbalancer/defaults/main.yml
@@ -2,7 +2,7 @@
haproxy_frontends:
- name: main
binds:
- - "*:8443"
+ - "*:{{ openshift_master_api_port | default(8443) }}"
default_backend: default
haproxy_backends:
diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml
index 400f80715..e9bc8b4ab 100644
--- a/roles/openshift_loadbalancer/tasks/main.yml
+++ b/roles/openshift_loadbalancer/tasks/main.yml
@@ -1,14 +1,31 @@
---
-- fail: msg="Cannot use containerized=true for load balancer hosts."
- when: openshift.common.is_containerized | bool
-
- name: Install haproxy
package: name=haproxy state=present
+ when: not openshift.common.is_containerized | bool
+
+- name: Pull haproxy image
+ command: >
+ docker pull {{ openshift.common.router_image }}:{{ openshift_image_tag }}
+ when: openshift.common.is_containerized | bool
+
+- name: Create config directory for haproxy
+ file:
+ path: /etc/haproxy
+ state: directory
+ when: openshift.common.is_containerized | bool
+
+- name: Create the systemd unit files
+ template:
+ src: "haproxy.docker.service.j2"
+ dest: "{{ containerized_svc_dir }}/haproxy.service"
+ when: openshift.common.is_containerized | bool
+ notify: restart haproxy
- name: Configure systemd service directory for haproxy
file:
path: /etc/systemd/system/haproxy.service.d
state: directory
+ when: not openshift.common.is_containerized | bool
# Work around ini_file create option in 2.2 which defaults to no
- name: Create limits.conf file
@@ -19,6 +36,7 @@
owner: root
group: root
changed_when: false
+ when: not openshift.common.is_containerized | bool
- name: Configure the nofile limits for haproxy
ini_file:
@@ -27,6 +45,7 @@
option: LimitNOFILE
value: "{{ openshift_loadbalancer_limit_nofile | default(100000) }}"
notify: restart haproxy
+ when: not openshift.common.is_containerized | bool
- name: Configure haproxy
template:
diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
new file mode 100644
index 000000000..624876ab0
--- /dev/null
+++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2
@@ -0,0 +1,17 @@
+[Unit]
+After=docker.service
+Requires=docker.service
+PartOf=docker.service
+
+[Service]
+ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer
+ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer -p {{ openshift_master_api_port | default(8443) }}:{{ openshift_master_api_port | default(8443) }} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint="haproxy -f /etc/haproxy/haproxy.cfg" {{ openshift.common.router_image }}:{{ openshift_image_tag }}
+ExecStartPost=/usr/bin/sleep 10
+ExecStop=/usr/bin/docker stop openshift_loadbalancer
+LimitNOFILE={{ openshift_loadbalancer_limit_nofile | default(100000) }}
+LimitCORE=infinity
+Restart=always
+RestartSec=5s
+
+[Install]
+WantedBy=docker.service
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
new file mode 100644
index 000000000..9b71dc676
--- /dev/null
+++ b/roles/openshift_logging/README.md
@@ -0,0 +1,91 @@
+## openshift_logging Role
+
+### Please note this role is still a work in progress
+
+This role is used for installing the Aggregated Logging stack. It should be run against
+a single host, it will create any missing certificates and API objects that the current
+[logging deployer](https://github.com/openshift/origin-aggregated-logging/tree/master/deployer) does.
+
+This role requires that the control host it is run on has Java installed as part of keystore
+generation for Elasticsearch (it uses JKS) as well as openssl to sign certificates.
+
+As part of the installation, it is recommended that you add the Fluentd node selector label
+to the list of persisted [node labels](https://docs.openshift.org/latest/install_config/install/advanced_install.html#configuring-node-host-labels).
+
+###Required vars:
+
+- `openshift_logging_install_logging`: When `True` the `openshift_logging` role will install Aggregated Logging.
+- `openshift_logging_upgrade_logging`: When `True` the `openshift_logging` role will upgrade Aggregated Logging.
+
+When both `openshift_logging_install_logging` and `openshift_logging_upgrade_logging` are `False` the `openshift_logging` role will uninstall Aggregated Logging.
+
+###Optional vars:
+
+- `openshift_logging_image_prefix`: The prefix for the logging images to use. Defaults to 'docker.io/openshift/origin-'.
+- `openshift_logging_image_version`: The image version for the logging images to use. Defaults to 'latest'.
+- `openshift_logging_use_ops`: If 'True', set up a second ES and Kibana cluster for infrastructure logs. Defaults to 'False'.
+- `master_url`: The URL for the Kubernetes master, this does not need to be public facing but should be accessible from within the cluster. Defaults to 'https://kubernetes.default.svc.cluster.local'.
+- `public_master_url`: The public facing URL for the Kubernetes master, this is used for Authentication redirection. Defaults to 'https://localhost:8443'.
+- `openshift_logging_namespace`: The namespace that Aggregated Logging will be installed in. Defaults to 'logging'.
+- `openshift_logging_curator_default_days`: The default minimum age (in days) Curator uses for deleting log records. Defaults to '30'.
+- `openshift_logging_curator_run_hour`: The hour of the day that Curator will run at. Defaults to '0'.
+- `openshift_logging_curator_run_minute`: The minute of the hour that Curator will run at. Defaults to '0'.
+- `openshift_logging_curator_run_timezone`: The timezone that Curator uses for figuring out its run time. Defaults to 'UTC'.
+- `openshift_logging_curator_script_log_level`: The script log level for Curator. Defaults to 'INFO'.
+- `openshift_logging_curator_log_level`: The log level for the Curator process. Defaults to 'ERROR'.
+- `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'.
+- `openshift_logging_curator_memory_limit`: The amount of memory to allocate to Curator. Unset if not specified.
+
+- `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'.
+- `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_proxy_debug`: When "True", set the Kibana Proxy log level to DEBUG. Defaults to 'false'.
+- `openshift_logging_kibana_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1.
+
+- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
+- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.
+- `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'.
+- `openshift_logging_fluentd_es_copy`: Whether or not to use the ES_COPY feature for Fluentd (DEPRECATED). Defaults to 'False'.
+- `openshift_logging_fluentd_use_journal`: Whether or not Fluentd should read log entries from Journal. Defaults to 'False'. NOTE: Fluentd will attempt to detect whether or not Docker is using the journald log driver and may overwrite this value.
+- `openshift_logging_fluentd_journal_read_from_head`: Whether or not Fluentd will try to read from the head of Journal when first starting up, using this may cause a delay in ES receiving current log records. Defaults to 'False'.
+- `openshift_logging_fluentd_hosts`: List of nodes that should be labeled for Fluentd to be deployed to. Defaults to ['--all'].
+
+- `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'.
+- `openshift_logging_es_port`: The port for the ES service Fluentd should sent its logs to. Defaults to '9200'.
+- `openshift_logging_es_ca`: The location of the ca Fluentd uses to communicate with its openshift_logging_es_host. Defaults to '/etc/fluent/keys/ca'.
+- `openshift_logging_es_client_cert`: The location of the client certificate Fluentd uses for openshift_logging_es_host. Defaults to '/etc/fluent/keys/cert'.
+- `openshift_logging_es_client_key`: The location of the client key Fluentd uses for openshift_logging_es_host. Defaults to '/etc/fluent/keys/key'.
+
+- `openshift_logging_es_cluster_size`: The number of ES cluster members. Defaults to '1'.
+- `openshift_logging_es_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
+- `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '1024Mi'.
+- `openshift_logging_es_pv_selector`: A key/value map added to a PVC in order to select specific PVs. Defaults to 'None'.
+- `openshift_logging_es_pvc_dynamic`: Whether or not to add the dynamic PVC annotation for any generated PVCs. Defaults to 'False'.
+- `openshift_logging_es_pvc_size`: The requested size for the ES PVCs, when not provided the role will not generate any PVCs. Defaults to '""'.
+- `openshift_logging_es_pvc_prefix`: The prefix for the generated PVCs. Defaults to 'logging-es'.
+- `openshift_logging_es_recover_after_time`: The amount of time ES will wait before it tries to recover. Defaults to '5m'.
+- `openshift_logging_es_storage_group`: The storage group used for ES. Defaults to '65534'.
+
+When `openshift_logging_use_ops` is `True`, there are some additional vars. These work the
+same as above for their non-ops counterparts, but apply to the OPS cluster instance:
+- `openshift_logging_es_ops_host`: logging-es-ops
+- `openshift_logging_es_ops_port`: 9200
+- `openshift_logging_es_ops_ca`: /etc/fluent/keys/ca
+- `openshift_logging_es_ops_client_cert`: /etc/fluent/keys/cert
+- `openshift_logging_es_ops_client_key`: /etc/fluent/keys/key
+- `openshift_logging_es_ops_cluster_size`: 1
+- `openshift_logging_es_ops_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
+- `openshift_logging_es_ops_memory_limit`: 1024Mi
+- `openshift_logging_es_ops_pvc_dynamic`: False
+- `openshift_logging_es_ops_pvc_size`: ""
+- `openshift_logging_es_ops_pvc_prefix`: logging-es-ops
+- `openshift_logging_es_ops_recover_after_time`: 5m
+- `openshift_logging_es_ops_storage_group`: 65534
+- `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'.
+- `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_ops_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_ops_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_ops_replica_count`: The number of replicas Kibana ops should be scaled up to. Defaults to 1.
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
new file mode 100644
index 000000000..919c53787
--- /dev/null
+++ b/roles/openshift_logging/defaults/main.yml
@@ -0,0 +1,85 @@
+---
+openshift_logging_image_prefix: docker.io/openshift/origin-
+openshift_logging_image_version: latest
+openshift_logging_use_ops: False
+master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
+public_master_url: "https://{{openshift.common.public_hostname}}:8443"
+openshift_logging_namespace: logging
+openshift_logging_install_logging: True
+
+openshift_logging_curator_default_days: 30
+openshift_logging_curator_run_hour: 0
+openshift_logging_curator_run_minute: 0
+openshift_logging_curator_run_timezone: UTC
+openshift_logging_curator_script_log_level: INFO
+openshift_logging_curator_log_level: ERROR
+openshift_logging_curator_cpu_limit: 100m
+openshift_logging_curator_memory_limit: null
+
+openshift_logging_curator_ops_cpu_limit: 100m
+openshift_logging_curator_ops_memory_limit: null
+
+openshift_logging_kibana_hostname: "kibana.{{openshift.common.dns_domain}}"
+openshift_logging_kibana_cpu_limit: null
+openshift_logging_kibana_memory_limit: null
+openshift_logging_kibana_proxy_debug: false
+openshift_logging_kibana_proxy_cpu_limit: null
+openshift_logging_kibana_proxy_memory_limit: null
+openshift_logging_kibana_replica_count: 1
+
+openshift_logging_kibana_ops_hostname: "kibana-ops.{{openshift.common.dns_domain}}"
+openshift_logging_kibana_ops_cpu_limit: null
+openshift_logging_kibana_ops_memory_limit: null
+openshift_logging_kibana_ops_proxy_debug: false
+openshift_logging_kibana_ops_proxy_cpu_limit: null
+openshift_logging_kibana_ops_proxy_memory_limit: null
+openshift_logging_kibana_ops_replica_count: 1
+
+openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'}
+openshift_logging_fluentd_cpu_limit: 100m
+openshift_logging_fluentd_memory_limit: 512Mi
+openshift_logging_fluentd_es_copy: false
+openshift_logging_fluentd_use_journal: false
+openshift_logging_fluentd_journal_read_from_head: false
+openshift_logging_fluentd_hosts: ['--all']
+
+openshift_logging_es_host: logging-es
+openshift_logging_es_port: 9200
+openshift_logging_es_ca: /etc/fluent/keys/ca
+openshift_logging_es_client_cert: /etc/fluent/keys/cert
+openshift_logging_es_client_key: /etc/fluent/keys/key
+openshift_logging_es_cluster_size: 1
+openshift_logging_es_cpu_limit: null
+openshift_logging_es_memory_limit: 1024Mi
+openshift_logging_es_pv_selector: null
+openshift_logging_es_pvc_dynamic: False
+openshift_logging_es_pvc_size: ""
+openshift_logging_es_pvc_prefix: logging-es
+openshift_logging_es_recover_after_time: 5m
+openshift_logging_es_storage_group: 65534
+
+# allow cluster-admin or cluster-reader to view operations index
+openshift_logging_es_ops_allow_cluster_reader: False
+
+openshift_logging_es_ops_host: logging-es-ops
+openshift_logging_es_ops_port: 9200
+openshift_logging_es_ops_ca: /etc/fluent/keys/ca
+openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
+openshift_logging_es_ops_client_key: /etc/fluent/keys/key
+openshift_logging_es_ops_cluster_size: 1
+openshift_logging_es_ops_cpu_limit: null
+openshift_logging_es_ops_memory_limit: 1024Mi
+openshift_logging_es_ops_pv_selector: None
+openshift_logging_es_ops_pvc_dynamic: False
+openshift_logging_es_ops_pvc_size: ""
+openshift_logging_es_ops_pvc_prefix: logging-es-ops
+openshift_logging_es_ops_recover_after_time: 5m
+openshift_logging_es_ops_storage_group: 65534
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#es_logging_contents:
+#es_config_contents:
+#curator_config_contents:
+#fluentd_config_contents:
+#fluentd_throttle_contents:
+#fluentd_secureforward_contents:
diff --git a/roles/openshift_logging/files/curator.yml b/roles/openshift_logging/files/curator.yml
new file mode 100644
index 000000000..8d62d8e7d
--- /dev/null
+++ b/roles/openshift_logging/files/curator.yml
@@ -0,0 +1,18 @@
+# Logging example curator config file
+
+# uncomment and use this to override the defaults from env vars
+#.defaults:
+# delete:
+# days: 30
+# runhour: 0
+# runminute: 0
+
+# to keep ops logs for a different duration:
+#.operations:
+# delete:
+# weeks: 8
+
+# example for a normal project
+#myapp:
+# delete:
+# weeks: 1
diff --git a/roles/openshift_logging/files/elasticsearch-logging.yml b/roles/openshift_logging/files/elasticsearch-logging.yml
new file mode 100644
index 000000000..377abe21f
--- /dev/null
+++ b/roles/openshift_logging/files/elasticsearch-logging.yml
@@ -0,0 +1,72 @@
+# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
+es.logger.level: INFO
+rootLogger: ${es.logger.level}, console, file
+logger:
+ # log action execution errors for easier debugging
+ action: WARN
+ # reduce the logging for aws, too much is logged under the default INFO
+ com.amazonaws: WARN
+ io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL}
+ io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL}
+
+ # gateway
+ #gateway: DEBUG
+ #index.gateway: DEBUG
+
+ # peer shard recovery
+ #indices.recovery: DEBUG
+
+ # discovery
+ #discovery: TRACE
+
+ index.search.slowlog: TRACE, index_search_slow_log_file
+ index.indexing.slowlog: TRACE, index_indexing_slow_log_file
+
+ # search-guard
+ com.floragunn.searchguard: WARN
+
+additivity:
+ index.search.slowlog: false
+ index.indexing.slowlog: false
+
+appender:
+ console:
+ type: console
+ layout:
+ type: consolePattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
+ # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
+ #file:
+ #type: extrasRollingFile
+ #file: ${path.logs}/${cluster.name}.log
+ #rollingPolicy: timeBased
+ #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
+ #layout:
+ #type: pattern
+ #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ index_search_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_search_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+
+ index_indexing_slow_log_file:
+ type: dailyRollingFile
+ file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
+ datePattern: "'.'yyyy-MM-dd"
+ layout:
+ type: pattern
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
diff --git a/roles/openshift_logging/files/es_migration.sh b/roles/openshift_logging/files/es_migration.sh
new file mode 100644
index 000000000..339b5a1b2
--- /dev/null
+++ b/roles/openshift_logging/files/es_migration.sh
@@ -0,0 +1,79 @@
+CA=${1:-/etc/openshift/logging/ca.crt}
+KEY=${2:-/etc/openshift/logging/system.admin.key}
+CERT=${3:-/etc/openshift/logging/system.admin.crt}
+openshift_logging_es_host=${4:-logging-es}
+openshift_logging_es_port=${5:-9200}
+namespace=${6:-logging}
+
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# skip indices that contain a uuid
+# get a list of unique project
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+function get_list_of_indices() {
+ curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
+ awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
+ '$3 !~ "^[.]" && $3 !~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
+ sort -u
+}
+
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# get a list of unique project.uuid
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+function get_list_of_proj_uuid_indices() {
+ curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
+ awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
+ '$3 !~ "^[.]" && $3 ~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
+ sort -u
+}
+
+if [[ -z "$(oc get pods -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}')" ]]; then
+ echo "No Elasticsearch pods found running. Cannot update common data model."
+ exit 1
+fi
+
+count=$(get_list_of_indices | wc -l)
+if [ $count -eq 0 ]; then
+ echo No matching indices found - skipping update_for_uuid
+else
+ echo Creating aliases for $count index patterns . . .
+ {
+ echo '{"actions":['
+ get_list_of_indices | \
+ while IFS=. read proj ; do
+ # e.g. make test.uuid.* an alias of test.* so we can search for
+ # /test.uuid.*/_search and get both the test.uuid.* and
+ # the test.* indices
+ uid=$(oc get project "$proj" -o jsonpath='{.metadata.uid}' 2>/dev/null)
+ [ -n "$uid" ] && echo "{\"add\":{\"index\":\"$proj.*\",\"alias\":\"$proj.$uuid.*\"}}"
+ done
+ echo ']}'
+ } | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
+fi
+
+count=$(get_list_of_proj_uuid_indices | wc -l)
+if [ $count -eq 0 ] ; then
+ echo No matching indexes found - skipping update_for_common_data_model
+ exit 0
+fi
+
+echo Creating aliases for $count index patterns . . .
+# for each index in _cat/indices
+# skip indices that begin with . - .kibana, .operations, etc.
+# get a list of unique project.uuid
+# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
+# we are interested in - the awk will strip that part off
+{
+ echo '{"actions":['
+ get_list_of_proj_uuid_indices | \
+ while IFS=. read proj uuid ; do
+ # e.g. make project.test.uuid.* and alias of test.uuid.* so we can search for
+ # /project.test.uuid.*/_search and get both the test.uuid.* and
+ # the project.test.uuid.* indices
+ echo "{\"add\":{\"index\":\"$proj.$uuid.*\",\"alias\":\"${PROJ_PREFIX}$proj.$uuid.*\"}}"
+ done
+ echo ']}'
+} | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
diff --git a/roles/openshift_logging/files/fluent.conf b/roles/openshift_logging/files/fluent.conf
new file mode 100644
index 000000000..aa843e983
--- /dev/null
+++ b/roles/openshift_logging/files/fluent.conf
@@ -0,0 +1,34 @@
+# This file is the fluentd configuration entrypoint. Edit with care.
+
+@include configs.d/openshift/system.conf
+
+# In each section below, pre- and post- includes don't include anything initially;
+# they exist to enable future additions to openshift conf as needed.
+
+## sources
+## ordered so that syslog always runs last...
+@include configs.d/openshift/input-pre-*.conf
+@include configs.d/dynamic/input-docker-*.conf
+@include configs.d/dynamic/input-syslog-*.conf
+@include configs.d/openshift/input-post-*.conf
+##
+
+<label @INGRESS>
+## filters
+ @include configs.d/openshift/filter-pre-*.conf
+ @include configs.d/openshift/filter-retag-journal.conf
+ @include configs.d/openshift/filter-k8s-meta.conf
+ @include configs.d/openshift/filter-kibana-transform.conf
+ @include configs.d/openshift/filter-k8s-flatten-hash.conf
+ @include configs.d/openshift/filter-k8s-record-transform.conf
+ @include configs.d/openshift/filter-syslog-record-transform.conf
+ @include configs.d/openshift/filter-post-*.conf
+##
+
+## matches
+ @include configs.d/openshift/output-pre-*.conf
+ @include configs.d/openshift/output-operations.conf
+ @include configs.d/openshift/output-applications.conf
+ # no post - applications.conf matches everything left
+##
+</label>
diff --git a/roles/openshift_logging/files/fluentd-throttle-config.yaml b/roles/openshift_logging/files/fluentd-throttle-config.yaml
new file mode 100644
index 000000000..375621ff1
--- /dev/null
+++ b/roles/openshift_logging/files/fluentd-throttle-config.yaml
@@ -0,0 +1,7 @@
+# Logging example fluentd throttling config file
+
+#example-project:
+# read_lines_limit: 10
+#
+#.operations:
+# read_lines_limit: 100
diff --git a/roles/openshift_logging/files/generate-jks.sh b/roles/openshift_logging/files/generate-jks.sh
new file mode 100644
index 000000000..9fe557f83
--- /dev/null
+++ b/roles/openshift_logging/files/generate-jks.sh
@@ -0,0 +1,178 @@
+#! /bin/sh
+set -ex
+
+function usage() {
+ echo Usage: `basename $0` cert_directory [logging_namespace] 1>&2
+}
+
+function generate_JKS_chain() {
+ dir=${SCRATCH_DIR:-_output}
+ ADD_OID=$1
+ NODE_NAME=$2
+ CERT_NAMES=${3:-$NODE_NAME}
+ ks_pass=${KS_PASS:-kspass}
+ ts_pass=${TS_PASS:-tspass}
+ rm -rf $NODE_NAME
+
+ extension_names=""
+ for name in ${CERT_NAMES//,/ }; do
+ extension_names="${extension_names},dns:${name}"
+ done
+
+ if [ "$ADD_OID" = true ]; then
+ extension_names="${extension_names},oid:1.2.3.4.5.5"
+ fi
+
+ echo Generating keystore and certificate for node $NODE_NAME
+
+ keytool -genkey \
+ -alias $NODE_NAME \
+ -keystore $dir/$NODE_NAME.jks \
+ -keypass $ks_pass \
+ -storepass $ks_pass \
+ -keyalg RSA \
+ -keysize 2048 \
+ -validity 712 \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \
+ -ext san=dns:localhost,ip:127.0.0.1"${extension_names}"
+
+ echo Generating certificate signing request for node $NODE_NAME
+
+ keytool -certreq \
+ -alias $NODE_NAME \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -file $dir/$NODE_NAME.csr \
+ -keyalg rsa \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging" \
+ -ext san=dns:localhost,ip:127.0.0.1"${extension_names}"
+
+ echo Sign certificate request with CA
+
+ openssl ca \
+ -in $dir/$NODE_NAME.csr \
+ -notext \
+ -out $dir/$NODE_NAME.crt \
+ -config $dir/signing.conf \
+ -extensions v3_req \
+ -batch \
+ -extensions server_ext
+
+ echo "Import back to keystore (including CA chain)"
+
+ keytool \
+ -import \
+ -file $dir/ca.crt \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -noprompt -alias sig-ca
+
+ keytool \
+ -import \
+ -file $dir/$NODE_NAME.crt \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -noprompt \
+ -alias $NODE_NAME
+
+ echo All done for $NODE_NAME
+}
+
+function generate_JKS_client_cert() {
+ NODE_NAME="$1"
+ ks_pass=${KS_PASS:-kspass}
+ ts_pass=${TS_PASS:-tspass}
+ dir=${SCRATCH_DIR:-_output} # for writing files to bundle into secrets
+
+ echo Generating keystore and certificate for node ${NODE_NAME}
+
+ keytool -genkey \
+ -alias $NODE_NAME \
+ -keystore $dir/$NODE_NAME.jks \
+ -keyalg RSA \
+ -keysize 2048 \
+ -validity 712 \
+ -keypass $ks_pass \
+ -storepass $ks_pass \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging"
+
+ echo Generating certificate signing request for node $NODE_NAME
+
+ keytool -certreq \
+ -alias $NODE_NAME \
+ -keystore $dir/$NODE_NAME.jks \
+ -file $dir/$NODE_NAME.jks.csr \
+ -keyalg rsa \
+ -keypass $ks_pass \
+ -storepass $ks_pass \
+ -dname "CN=$NODE_NAME, OU=OpenShift, O=Logging"
+
+ echo Sign certificate request with CA
+ openssl ca \
+ -in "$dir/$NODE_NAME.jks.csr" \
+ -notext \
+ -out "$dir/$NODE_NAME.jks.crt" \
+ -config $dir/signing.conf \
+ -extensions v3_req \
+ -batch \
+ -extensions server_ext
+
+ echo "Import back to keystore (including CA chain)"
+
+ keytool \
+ -import \
+ -file $dir/ca.crt \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -noprompt -alias sig-ca
+
+ keytool \
+ -import \
+ -file $dir/$NODE_NAME.jks.crt \
+ -keystore $dir/$NODE_NAME.jks \
+ -storepass $ks_pass \
+ -noprompt \
+ -alias $NODE_NAME
+
+ echo All done for $NODE_NAME
+}
+
+function join { local IFS="$1"; shift; echo "$*"; }
+
+function createTruststore() {
+
+ echo "Import CA to truststore for validating client certs"
+
+ keytool \
+ -import \
+ -file $dir/ca.crt \
+ -keystore $dir/truststore.jks \
+ -storepass $ts_pass \
+ -noprompt -alias sig-ca
+}
+
+if [ $# -lt 1 ]; then
+ usage
+ exit 1
+fi
+
+dir=$1
+SCRATCH_DIR=$dir
+PROJECT=${2:-logging}
+
+if [[ ! -f $dir/system.admin.jks || -z "$(keytool -list -keystore $dir/system.admin.jks -storepass kspass | grep sig-ca)" ]]; then
+ generate_JKS_client_cert "system.admin"
+fi
+
+if [[ ! -f $dir/elasticsearch.jks || -z "$(keytool -list -keystore $dir/elasticsearch.jks -storepass kspass | grep sig-ca)" ]]; then
+ generate_JKS_chain true elasticsearch "$(join , logging-es{,-ops})"
+fi
+
+if [[ ! -f $dir/logging-es.jks || -z "$(keytool -list -keystore $dir/logging-es.jks -storepass kspass | grep sig-ca)" ]]; then
+ generate_JKS_chain false logging-es "$(join , logging-es{,-ops}{,-cluster}{,.${PROJECT}.svc.cluster.local})"
+fi
+
+[ ! -f $dir/truststore.jks ] && createTruststore
+
+# necessary so that the job knows it completed successfully
+exit 0
diff --git a/roles/openshift_logging/files/logging-deployer-sa.yaml b/roles/openshift_logging/files/logging-deployer-sa.yaml
new file mode 100644
index 000000000..334c9402b
--- /dev/null
+++ b/roles/openshift_logging/files/logging-deployer-sa.yaml
@@ -0,0 +1,6 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: logging-deployer
+secrets:
+- name: logging-deployer
diff --git a/roles/openshift_logging/files/secure-forward.conf b/roles/openshift_logging/files/secure-forward.conf
new file mode 100644
index 000000000..f4483df79
--- /dev/null
+++ b/roles/openshift_logging/files/secure-forward.conf
@@ -0,0 +1,24 @@
+# @type secure_forward
+
+# self_hostname ${HOSTNAME}
+# shared_key <SECRET_STRING>
+
+# secure yes
+# enable_strict_verification yes
+
+# ca_cert_path /etc/fluent/keys/your_ca_cert
+# ca_private_key_path /etc/fluent/keys/your_private_key
+ # for private CA secret key
+# ca_private_key_passphrase passphrase
+
+# <server>
+ # or IP
+# host server.fqdn.example.com
+# port 24284
+# </server>
+# <server>
+ # ip address to connect
+# host 203.0.113.8
+ # specify hostlabel for FQDN verification if ipaddress is used for host
+# hostlabel server.fqdn.example.com
+# </server>
diff --git a/roles/openshift_logging/files/server-tls.json b/roles/openshift_logging/files/server-tls.json
new file mode 100644
index 000000000..86deb23e3
--- /dev/null
+++ b/roles/openshift_logging/files/server-tls.json
@@ -0,0 +1,5 @@
+// See for available options: https://nodejs.org/api/tls.html#tls_tls_createserver_options_secureconnectionlistener
+tls_options = {
+ ciphers: 'kEECDH:+kEECDH+SHA:kEDH:+kEDH+SHA:+kEDH+CAMELLIA:kECDH:+kECDH+SHA:kRSA:+kRSA+SHA:+kRSA+CAMELLIA:!aNULL:!eNULL:!SSLv2:!RC4:!DES:!EXP:!SEED:!IDEA:+3DES',
+ honorCipherOrder: true
+}
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
new file mode 100644
index 000000000..007be3ac0
--- /dev/null
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -0,0 +1,38 @@
+'''
+ Openshift Logging class that provides useful filters used in Logging
+'''
+
+import random
+
+
+def random_word(source_alpha, length):
+ ''' Returns a random word given the source of characters to pick from and resulting length '''
+ return ''.join(random.choice(source_alpha) for i in range(length))
+
+
+def entry_from_named_pair(register_pairs, key):
+ ''' Returns the entry in key given results provided by register_pairs '''
+ results = register_pairs.get("results")
+ if results is None:
+ raise RuntimeError("The dict argument does not have a 'results' entry. "
+ "Must not have been created using 'register' in a loop")
+ for result in results:
+ item = result.get("item")
+ if item is not None:
+ name = item.get("name")
+ if name == key:
+ return result["content"]
+ raise RuntimeError("There was no entry found in the dict that had an item with a name that matched {}".format(key))
+
+
+# pylint: disable=too-few-public-methods
+class FilterModule(object):
+ ''' OpenShift Logging Filters '''
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ ''' Returns the names of the filters provided by this class '''
+ return {
+ 'random_word': random_word,
+ 'entry_from_named_pair': entry_from_named_pair,
+ }
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
new file mode 100644
index 000000000..64bc33435
--- /dev/null
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -0,0 +1,340 @@
+'''
+---
+module: openshift_logging_facts
+version_added: ""
+short_description: Gather facts about the OpenShift logging stack
+description:
+ - Determine the current facts about the OpenShift logging stack (e.g. cluster size)
+options:
+author: Red Hat, Inc
+'''
+
+import copy
+import json
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+from subprocess import * # noqa: F402,F403
+
+# ignore pylint errors related to the module_utils import
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
+from ansible.module_utils.basic import * # noqa: F402,F403
+
+import yaml
+
+EXAMPLES = """
+- action: opneshift_logging_facts
+"""
+
+RETURN = """
+"""
+
+DEFAULT_OC_OPTIONS = ["-o", "json"]
+
+# constants used for various labels and selectors
+COMPONENT_KEY = "component"
+LOGGING_INFRA_KEY = "logging-infra"
+
+# selectors for filtering resources
+DS_FLUENTD_SELECTOR = LOGGING_INFRA_KEY + "=" + "fluentd"
+LOGGING_SELECTOR = LOGGING_INFRA_KEY + "=" + "support"
+ROUTE_SELECTOR = "component=support, logging-infra=support, provider=openshift"
+COMPONENTS = ["kibana", "curator", "elasticsearch", "fluentd", "kibana_ops", "curator_ops", "elasticsearch_ops"]
+
+
+class OCBaseCommand(object):
+ ''' The base class used to query openshift '''
+
+ def __init__(self, binary, kubeconfig, namespace):
+ ''' the init method of OCBaseCommand class '''
+ self.binary = binary
+ self.kubeconfig = kubeconfig
+ self.user = self.get_system_admin(self.kubeconfig)
+ self.namespace = namespace
+
+ # pylint: disable=no-self-use
+ def get_system_admin(self, kubeconfig):
+ ''' Retrieves the system admin '''
+ with open(kubeconfig, 'r') as kubeconfig_file:
+ config = yaml.load(kubeconfig_file)
+ for user in config["users"]:
+ if user["name"].startswith("system:admin"):
+ return user["name"]
+ raise Exception("Unable to find system:admin in: " + kubeconfig)
+
+ # pylint: disable=too-many-arguments, dangerous-default-value
+ def oc_command(self, sub, kind, namespace=None, name=None, add_options=None):
+ ''' Wrapper method for the "oc" command '''
+ cmd = [self.binary, sub, kind]
+ if name is not None:
+ cmd = cmd + [name]
+ if namespace is not None:
+ cmd = cmd + ["-n", namespace]
+ if add_options is None:
+ add_options = []
+ cmd = cmd + ["--user=" + self.user, "--config=" + self.kubeconfig] + DEFAULT_OC_OPTIONS + add_options
+ try:
+ process = Popen(cmd, stdout=PIPE, stderr=PIPE) # noqa: F405
+ out, err = process.communicate(cmd)
+ if len(err) > 0:
+ if 'not found' in err:
+ return {'items': []}
+ if 'No resources found' in err:
+ return {'items': []}
+ raise Exception(err)
+ except Exception as excp:
+ err = "There was an exception trying to run the command '" + " ".join(cmd) + "' " + str(excp)
+ raise Exception(err)
+
+ return json.loads(out)
+
+
+class OpenshiftLoggingFacts(OCBaseCommand):
+ ''' The class structure for holding the OpenshiftLogging Facts'''
+ name = "facts"
+
+ def __init__(self, logger, binary, kubeconfig, namespace):
+ ''' The init method for OpenshiftLoggingFacts '''
+ super(OpenshiftLoggingFacts, self).__init__(binary, kubeconfig, namespace)
+ self.logger = logger
+ self.facts = dict()
+
+ def default_keys_for(self, kind):
+ ''' Sets the default key values for kind '''
+ for comp in COMPONENTS:
+ self.add_facts_for(comp, kind)
+
+ def add_facts_for(self, comp, kind, name=None, facts=None):
+ ''' Add facts for the provided kind '''
+ if comp not in self.facts:
+ self.facts[comp] = dict()
+ if kind not in self.facts[comp]:
+ self.facts[comp][kind] = dict()
+ if name:
+ self.facts[comp][kind][name] = facts
+
+ def facts_for_routes(self, namespace):
+ ''' Gathers facts for Routes in logging namespace '''
+ self.default_keys_for("routes")
+ route_list = self.oc_command("get", "routes", namespace=namespace, add_options=["-l", ROUTE_SELECTOR])
+ if len(route_list["items"]) == 0:
+ return None
+ for route in route_list["items"]:
+ name = route["metadata"]["name"]
+ comp = self.comp(name)
+ if comp is not None:
+ self.add_facts_for(comp, "routes", name, dict(host=route["spec"]["host"]))
+ self.facts["agl_namespace"] = namespace
+
+ def facts_for_daemonsets(self, namespace):
+ ''' Gathers facts for Daemonsets in logging namespace '''
+ self.default_keys_for("daemonsets")
+ ds_list = self.oc_command("get", "daemonsets", namespace=namespace,
+ add_options=["-l", LOGGING_INFRA_KEY + "=fluentd"])
+ if len(ds_list["items"]) == 0:
+ return
+ for ds_item in ds_list["items"]:
+ name = ds_item["metadata"]["name"]
+ comp = self.comp(name)
+ spec = ds_item["spec"]["template"]["spec"]
+ container = spec["containers"][0]
+ result = dict(
+ selector=ds_item["spec"]["selector"],
+ image=container["image"],
+ resources=container["resources"],
+ nodeSelector=spec["nodeSelector"],
+ serviceAccount=spec["serviceAccount"],
+ terminationGracePeriodSeconds=spec["terminationGracePeriodSeconds"]
+ )
+ self.add_facts_for(comp, "daemonsets", name, result)
+
+ def facts_for_pvcs(self, namespace):
+ ''' Gathers facts for PVCS in logging namespace'''
+ self.default_keys_for("pvcs")
+ pvclist = self.oc_command("get", "pvc", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY])
+ if len(pvclist["items"]) == 0:
+ return
+ for pvc in pvclist["items"]:
+ name = pvc["metadata"]["name"]
+ comp = self.comp(name)
+ self.add_facts_for(comp, "pvcs", name, dict())
+
+ def facts_for_deploymentconfigs(self, namespace):
+ ''' Gathers facts for DeploymentConfigs in logging namespace '''
+ self.default_keys_for("deploymentconfigs")
+ dclist = self.oc_command("get", "deploymentconfigs", namespace=namespace, add_options=["-l", LOGGING_INFRA_KEY])
+ if len(dclist["items"]) == 0:
+ return
+ dcs = dclist["items"]
+ for dc_item in dcs:
+ name = dc_item["metadata"]["name"]
+ comp = self.comp(name)
+ if comp is not None:
+ spec = dc_item["spec"]["template"]["spec"]
+ facts = dict(
+ selector=dc_item["spec"]["selector"],
+ replicas=dc_item["spec"]["replicas"],
+ serviceAccount=spec["serviceAccount"],
+ containers=dict(),
+ volumes=dict()
+ )
+ if "volumes" in spec:
+ for vol in spec["volumes"]:
+ clone = copy.deepcopy(vol)
+ clone.pop("name", None)
+ facts["volumes"][vol["name"]] = clone
+ for container in spec["containers"]:
+ facts["containers"][container["name"]] = dict(
+ image=container["image"],
+ resources=container["resources"],
+ )
+ self.add_facts_for(comp, "deploymentconfigs", name, facts)
+
+ def facts_for_services(self, namespace):
+ ''' Gathers facts for services in logging namespace '''
+ self.default_keys_for("services")
+ servicelist = self.oc_command("get", "services", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
+ if len(servicelist["items"]) == 0:
+ return
+ for service in servicelist["items"]:
+ name = service["metadata"]["name"]
+ comp = self.comp(name)
+ if comp is not None:
+ self.add_facts_for(comp, "services", name, dict())
+
+ def facts_for_configmaps(self, namespace):
+ ''' Gathers facts for configmaps in logging namespace '''
+ self.default_keys_for("configmaps")
+ a_list = self.oc_command("get", "configmaps", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
+ if len(a_list["items"]) == 0:
+ return
+ for item in a_list["items"]:
+ name = item["metadata"]["name"]
+ comp = self.comp(name)
+ if comp is not None:
+ self.add_facts_for(comp, "configmaps", name, item["data"])
+
+ def facts_for_oauthclients(self, namespace):
+ ''' Gathers facts for oauthclients used with logging '''
+ self.default_keys_for("oauthclients")
+ a_list = self.oc_command("get", "oauthclients", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
+ if len(a_list["items"]) == 0:
+ return
+ for item in a_list["items"]:
+ name = item["metadata"]["name"]
+ comp = self.comp(name)
+ if comp is not None:
+ result = dict(
+ redirectURIs=item["redirectURIs"]
+ )
+ self.add_facts_for(comp, "oauthclients", name, result)
+
+ def facts_for_secrets(self, namespace):
+ ''' Gathers facts for secrets in the logging namespace '''
+ self.default_keys_for("secrets")
+ a_list = self.oc_command("get", "secrets", namespace=namespace)
+ if len(a_list["items"]) == 0:
+ return
+ for item in a_list["items"]:
+ name = item["metadata"]["name"]
+ comp = self.comp(name)
+ if comp is not None and item["type"] == "Opaque":
+ result = dict(
+ keys=item["data"].keys()
+ )
+ self.add_facts_for(comp, "secrets", name, result)
+
+ def facts_for_sccs(self):
+ ''' Gathers facts for SCCs used with logging '''
+ self.default_keys_for("sccs")
+ scc = self.oc_command("get", "scc", name="privileged")
+ if len(scc["users"]) == 0:
+ return
+ for item in scc["users"]:
+ comp = self.comp(item)
+ if comp is not None:
+ self.add_facts_for(comp, "sccs", "privileged", dict())
+
+ def facts_for_clusterrolebindings(self, namespace):
+ ''' Gathers ClusterRoleBindings used with logging '''
+ self.default_keys_for("clusterrolebindings")
+ role = self.oc_command("get", "clusterrolebindings", name="cluster-readers")
+ if "subjects" not in role or len(role["subjects"]) == 0:
+ return
+ for item in role["subjects"]:
+ comp = self.comp(item["name"])
+ if comp is not None and namespace == item["namespace"]:
+ self.add_facts_for(comp, "clusterrolebindings", "cluster-readers", dict())
+
+# this needs to end up nested under the service account...
+ def facts_for_rolebindings(self, namespace):
+ ''' Gathers facts for RoleBindings used with logging '''
+ self.default_keys_for("rolebindings")
+ role = self.oc_command("get", "rolebindings", namespace=namespace, name="logging-elasticsearch-view-role")
+ if "subjects" not in role or len(role["subjects"]) == 0:
+ return
+ for item in role["subjects"]:
+ comp = self.comp(item["name"])
+ if comp is not None and namespace == item["namespace"]:
+ self.add_facts_for(comp, "rolebindings", "logging-elasticsearch-view-role", dict())
+
+ # pylint: disable=no-self-use, too-many-return-statements
+ def comp(self, name):
+ ''' Does a comparison to evaluate the logging component '''
+ if name.startswith("logging-curator-ops"):
+ return "curator_ops"
+ elif name.startswith("logging-kibana-ops") or name.startswith("kibana-ops"):
+ return "kibana_ops"
+ elif name.startswith("logging-es-ops") or name.startswith("logging-elasticsearch-ops"):
+ return "elasticsearch_ops"
+ elif name.startswith("logging-curator"):
+ return "curator"
+ elif name.startswith("logging-kibana") or name.startswith("kibana"):
+ return "kibana"
+ elif name.startswith("logging-es") or name.startswith("logging-elasticsearch"):
+ return "elasticsearch"
+ elif name.startswith("logging-fluentd") or name.endswith("aggregated-logging-fluentd"):
+ return "fluentd"
+ else:
+ return None
+
+ def build_facts(self):
+ ''' Builds the logging facts and returns them '''
+ self.facts_for_routes(self.namespace)
+ self.facts_for_daemonsets(self.namespace)
+ self.facts_for_deploymentconfigs(self.namespace)
+ self.facts_for_services(self.namespace)
+ self.facts_for_configmaps(self.namespace)
+ self.facts_for_sccs()
+ self.facts_for_oauthclients(self.namespace)
+ self.facts_for_clusterrolebindings(self.namespace)
+ self.facts_for_rolebindings(self.namespace)
+ self.facts_for_secrets(self.namespace)
+ self.facts_for_pvcs(self.namespace)
+
+ return self.facts
+
+
+def main():
+ ''' The main method '''
+ module = AnsibleModule( # noqa: F405
+ argument_spec=dict(
+ admin_kubeconfig={"required": True, "type": "str"},
+ oc_bin={"required": True, "type": "str"},
+ openshift_logging_namespace={"required": True, "type": "str"}
+ ),
+ supports_check_mode=False
+ )
+ try:
+ cmd = OpenshiftLoggingFacts(module, module.params['oc_bin'], module.params['admin_kubeconfig'],
+ module.params['openshift_logging_namespace'])
+ module.exit_json(
+ ansible_facts={"openshift_logging_facts": cmd.build_facts()}
+ )
+ # ignore broad-except error to avoid stack trace to ansible user
+ # pylint: disable=broad-except
+ except Exception as error:
+ module.fail_json(msg=str(error))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml
new file mode 100644
index 000000000..7050e51db
--- /dev/null
+++ b/roles/openshift_logging/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Embedded Router
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
new file mode 100644
index 000000000..908f3ee88
--- /dev/null
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -0,0 +1,114 @@
+---
+- name: stop logging
+ include: stop_cluster.yaml
+
+# delete the deployment objects that we had created
+- name: delete logging api objects
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete {{ item }} --selector logging-infra -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - dc
+ - rc
+ - svc
+ - routes
+ - templates
+ - daemonset
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+
+# delete the oauthclient
+- name: delete oauthclient kibana-proxy
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete oauthclient kibana-proxy --ignore-not-found=true
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete any image streams that we may have created
+- name: delete logging is
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete is -l logging-infra=support -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our old secrets
+- name: delete logging secrets
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete secret {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - logging-fluentd
+ - logging-elasticsearch
+ - logging-kibana
+ - logging-kibana-proxy
+ - logging-curator
+ ignore_errors: yes
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete role bindings
+- name: delete rolebindings
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete rolebinding {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - logging-elasticsearch-view-role
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete cluster role bindings
+- name: delete cluster role bindings
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterrolebindings {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - rolebinding-reader
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete cluster roles
+- name: delete cluster roles
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterroles {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - rolebinding-reader
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our service accounts
+- name: delete service accounts
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete serviceaccount {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - aggregated-logging-elasticsearch
+ - aggregated-logging-kibana
+ - aggregated-logging-curator
+ - aggregated-logging-fluentd
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our roles
+- name: delete roles
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete clusterrole {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - daemonset-admin
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+
+# delete our configmaps
+- name: delete configmaps
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete configmap {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ with_items:
+ - logging-curator
+ - logging-elasticsearch
+ - logging-fluentd
+ register: delete_result
+ changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
new file mode 100644
index 000000000..20e50482e
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -0,0 +1,143 @@
+---
+# we will ensure our secrets and configmaps are set up here first
+- name: Checking for ca.key
+ stat: path="{{generated_certs_dir}}/ca.key"
+ register: ca_key_file
+ check_mode: no
+
+- name: Checking for ca.crt
+ stat: path="{{generated_certs_dir}}/ca.crt"
+ register: ca_cert_file
+ check_mode: no
+
+- name: Checking for ca.serial.txt
+ stat: path="{{generated_certs_dir}}/ca.serial.txt"
+ register: ca_serial_file
+ check_mode: no
+
+- name: Generate certificates
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
+ --key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
+ --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
+ check_mode: no
+ when:
+ - not ca_key_file.stat.exists
+ - not ca_cert_file.stat.exists
+ - not ca_serial_file.stat.exists
+
+- name: Checking for signing.conf
+ stat: path="{{generated_certs_dir}}/signing.conf"
+ register: signing_conf_file
+ check_mode: no
+
+- template: src=signing.conf.j2 dest={{generated_certs_dir}}/signing.conf
+ vars:
+ - top_dir: '{{generated_certs_dir}}'
+ when: not signing_conf_file.stat.exists
+
+- include: procure_server_certs.yaml
+ loop_control:
+ loop_var: cert_info
+ with_items:
+ - procure_component: kibana
+ - procure_component: kibana-ops
+ - procure_component: kibana-internal
+ hostnames: "kibana, kibana-ops, {{openshift_logging_kibana_hostname}}, {{openshift_logging_kibana_ops_hostname}}"
+
+- name: Copy proxy TLS configuration file
+ copy: src=server-tls.json dest={{generated_certs_dir}}/server-tls.json
+ when: server_tls_json is undefined
+ check_mode: no
+
+- name: Copy proxy TLS configuration file
+ copy: content="{{server_tls_json}}" dest={{generated_certs_dir}}/server-tls.json
+ when: server_tls_json is defined
+ check_mode: no
+
+- name: Checking for ca.db
+ stat: path="{{generated_certs_dir}}/ca.db"
+ register: ca_db_file
+ check_mode: no
+
+- copy: content="" dest={{generated_certs_dir}}/ca.db
+ check_mode: no
+ when:
+ - not ca_db_file.stat.exists
+
+- name: Checking for ca.crt.srl
+ stat: path="{{generated_certs_dir}}/ca.crt.srl"
+ register: ca_cert_srl_file
+ check_mode: no
+
+- copy: content="" dest={{generated_certs_dir}}/ca.crt.srl
+ check_mode: no
+ when:
+ - not ca_cert_srl_file.stat.exists
+
+- name: Generate PEM certs
+ include: generate_pems.yaml component={{node_name}}
+ with_items:
+ - system.logging.fluentd
+ - system.logging.kibana
+ - system.logging.curator
+ - system.admin
+ loop_control:
+ loop_var: node_name
+
+- name: Creating necessary JKS certs
+ include: generate_jks.yaml
+
+# check for secret/logging-kibana-proxy
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.oauth-secret}'
+ register: kibana_secret_oauth_check
+ ignore_errors: yes
+ changed_when: no
+ check_mode: no
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get secret/logging-kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.data.session-secret}'
+ register: kibana_secret_session_check
+ ignore_errors: yes
+ changed_when: no
+ check_mode: no
+
+# check for oauthclient secret
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get oauthclient/kibana-proxy -n {{openshift_logging_namespace}} -o jsonpath='{.secret}'
+ register: oauth_secret_check
+ ignore_errors: yes
+ changed_when: no
+ check_mode: no
+
+# set or generate as needed
+- name: Generate proxy session
+ set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}}
+ check_mode: no
+ when:
+ - kibana_secret_session_check.stdout is not defined or kibana_secret_session_check.stdout == ''
+
+- name: Generate proxy session
+ set_fact: session_secret={{kibana_secret_session_check.stdout | b64decode }}
+ check_mode: no
+ when:
+ - kibana_secret_session_check.stdout is defined
+ - kibana_secret_session_check.stdout != ''
+
+- name: Generate oauth client secret
+ set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}
+ check_mode: no
+ when: kibana_secret_oauth_check.stdout is not defined or kibana_secret_oauth_check.stdout == ''
+ or oauth_secret_check.stdout is not defined or oauth_secret_check.stdout == ''
+ or kibana_secret_oauth_check.stdout | b64decode != oauth_secret_check.stdout
+
+- name: Generate oauth client secret
+ set_fact: oauth_secret={{kibana_secret_oauth_check.stdout | b64decode}}
+ check_mode: no
+ when:
+ - kibana_secret_oauth_check is defined
+ - kibana_secret_oauth_check.stdout != ''
+ - oauth_secret_check.stdout is defined
+ - oauth_secret_check.stdout != ''
+ - kibana_secret_oauth_check.stdout | b64decode == oauth_secret_check.stdout
diff --git a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml
new file mode 100644
index 000000000..56f590717
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml
@@ -0,0 +1,13 @@
+---
+- name: Generate ClusterRoleBindings
+ template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/logging-15-{{obj_name}}-clusterrolebinding.yaml
+ vars:
+ acct_name: aggregated-logging-elasticsearch
+ obj_name: rolebinding-reader
+ crb_usernames: ["system:serviceaccount:{{openshift_logging_namespace}}:{{acct_name}}"]
+ subjects:
+ - kind: ServiceAccount
+ name: "{{acct_name}}"
+ namespace: "{{openshift_logging_namespace}}"
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_clusterroles.yaml b/roles/openshift_logging/tasks/generate_clusterroles.yaml
new file mode 100644
index 000000000..0b8b1014c
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_clusterroles.yaml
@@ -0,0 +1,11 @@
+---
+- name: Generate ClusterRole for cluster-reader
+ template: src=clusterrole.j2 dest={{mktemp.stdout}}/templates/logging-10-{{obj_name}}-clusterrole.yaml
+ vars:
+ obj_name: rolebinding-reader
+ rules:
+ - resources: [clusterrolebindings]
+ verbs:
+ - get
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
new file mode 100644
index 000000000..8fcf517ad
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_configmaps.yaml
@@ -0,0 +1,117 @@
+---
+- block:
+ - copy:
+ src: elasticsearch-logging.yml
+ dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
+ when: es_logging_contents is undefined
+ changed_when: no
+
+ - template:
+ src: elasticsearch.yml.j2
+ dest: "{{mktemp.stdout}}/elasticsearch.yml"
+ vars:
+ - allow_cluster_reader: "{{openshift_logging_es_ops_allow_cluster_reader | lower | default('false')}}"
+ when: es_config_contents is undefined
+ changed_when: no
+
+ - copy:
+ content: "{{es_logging_contents}}"
+ dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
+ when: es_logging_contents is defined
+ changed_when: no
+
+ - copy:
+ content: "{{es_config_contents}}"
+ dest: "{{mktemp.stdout}}/elasticsearch.yml"
+ when: es_config_contents is defined
+ changed_when: no
+
+ - command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-elasticsearch
+ --from-file=logging.yml={{mktemp.stdout}}/elasticsearch-logging.yml --from-file=elasticsearch.yml={{mktemp.stdout}}/elasticsearch.yml -o yaml --dry-run
+ register: es_configmap
+ changed_when: no
+
+ - copy:
+ content: "{{es_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-elasticsearch-configmap.yaml"
+ when: es_configmap.stdout is defined
+ changed_when: no
+ check_mode: no
+
+- block:
+ - copy:
+ src: curator.yml
+ dest: "{{mktemp.stdout}}/curator.yml"
+ when: curator_config_contents is undefined
+ changed_when: no
+
+ - copy:
+ content: "{{curator_config_contents}}"
+ dest: "{{mktemp.stdout}}/curator.yml"
+ when: curator_config_contents is defined
+ changed_when: no
+
+ - command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-curator
+ --from-file=config.yaml={{mktemp.stdout}}/curator.yml -o yaml --dry-run
+ register: curator_configmap
+ changed_when: no
+
+ - copy:
+ content: "{{curator_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-curator-configmap.yaml"
+ when: curator_configmap.stdout is defined
+ changed_when: no
+ check_mode: no
+
+- block:
+ - copy:
+ src: fluent.conf
+ dest: "{{mktemp.stdout}}/fluent.conf"
+ when: fluentd_config_contents is undefined
+ changed_when: no
+
+ - copy:
+ src: fluentd-throttle-config.yaml
+ dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml"
+ when: fluentd_throttle_contents is undefined
+ changed_when: no
+
+ - copy:
+ src: secure-forward.conf
+ dest: "{{mktemp.stdout}}/secure-forward.conf"
+ when: fluentd_securefoward_contents is undefined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_config_contents}}"
+ dest: "{{mktemp.stdout}}/fluent.conf"
+ when: fluentd_config_contents is defined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_throttle_contents}}"
+ dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml"
+ when: fluentd_throttle_contents is defined
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_secureforward_contents}}"
+ dest: "{{mktemp.stdout}}/secure-forward.conf"
+ when: fluentd_secureforward_contents is defined
+ changed_when: no
+
+ - command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-fluentd
+ --from-file=fluent.conf={{mktemp.stdout}}/fluent.conf --from-file=throttle-config.yaml={{mktemp.stdout}}/fluentd-throttle-config.yaml
+ --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward.conf -o yaml --dry-run
+ register: fluentd_configmap
+ changed_when: no
+
+ - copy:
+ content: "{{fluentd_configmap.stdout}}"
+ dest: "{{mktemp.stdout}}/templates/logging-fluentd-configmap.yaml"
+ when: fluentd_configmap.stdout is defined
+ changed_when: no
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml
new file mode 100644
index 000000000..8aea4e81f
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml
@@ -0,0 +1,65 @@
+---
+- name: Generate kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-dc.yaml
+ vars:
+ component: kibana
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-ops-dc.yaml
+ vars:
+ component: kibana-ops
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es-ops
+ es_port: "{{openshift_logging_es_ops_port}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate elasticsearch deploymentconfig
+ template: src=es.j2 dest={{mktemp.stdout}}/logging-es-dc.yaml
+ vars:
+ component: es
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-abc123"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS elasticsearch deploymentconfig
+ template: src=es.j2 dest={{mktemp.stdout}}/logging-es-ops-dc.yaml
+ vars:
+ component: es-ops
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-abc123"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-dc.yaml
+ vars:
+ component: curator
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-ops-dc.yaml
+ vars:
+ component: curator-ops
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ openshift_logging_es_host: logging-es-ops
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_jks.yaml b/roles/openshift_logging/tasks/generate_jks.yaml
new file mode 100644
index 000000000..adb6c2b2d
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_jks.yaml
@@ -0,0 +1,111 @@
+---
+# check if pod generated files exist -- if they all do don't run the pod
+- name: Checking for elasticsearch.jks
+ stat: path="{{generated_certs_dir}}/elasticsearch.jks"
+ register: elasticsearch_jks
+ check_mode: no
+
+- name: Checking for logging-es.jks
+ stat: path="{{generated_certs_dir}}/logging-es.jks"
+ register: logging_es_jks
+ check_mode: no
+
+- name: Checking for system.admin.jks
+ stat: path="{{generated_certs_dir}}/system.admin.jks"
+ register: system_admin_jks
+ check_mode: no
+
+- name: Checking for truststore.jks
+ stat: path="{{generated_certs_dir}}/truststore.jks"
+ register: truststore_jks
+ check_mode: no
+
+- name: Create temp directory for doing work in
+ local_action: command mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: local_tmp
+ changed_when: False
+ check_mode: no
+
+- name: Create placeholder for previously created JKS certs to prevent recreating...
+ file:
+ path: "{{local_tmp.stdout}}/elasticsearch.jks"
+ state: touch
+ mode: "u=rw,g=r,o=r"
+ when: elasticsearch_jks.stat.exists
+ changed_when: False
+
+- name: Create placeholder for previously created JKS certs to prevent recreating...
+ file:
+ path: "{{local_tmp.stdout}}/logging-es.jks"
+ state: touch
+ mode: "u=rw,g=r,o=r"
+ when: logging_es_jks.stat.exists
+ changed_when: False
+
+- name: Create placeholder for previously created JKS certs to prevent recreating...
+ file:
+ path: "{{local_tmp.stdout}}/system.admin.jks"
+ state: touch
+ mode: "u=rw,g=r,o=r"
+ when: system_admin_jks.stat.exists
+ changed_when: False
+
+- name: Create placeholder for previously created JKS certs to prevent recreating...
+ file:
+ path: "{{local_tmp.stdout}}/truststore.jks"
+ state: touch
+ mode: "u=rw,g=r,o=r"
+ when: truststore_jks.stat.exists
+ changed_when: False
+
+- name: pulling down signing items from host
+ fetch:
+ src: "{{generated_certs_dir}}/{{item}}"
+ dest: "{{local_tmp.stdout}}/{{item}}"
+ flat: yes
+ with_items:
+ - ca.crt
+ - ca.key
+ - ca.serial.txt
+ - ca.crl.srl
+ - ca.db
+
+- local_action: template src=signing.conf.j2 dest={{local_tmp.stdout}}/signing.conf
+ vars:
+ - top_dir: "{{local_tmp.stdout}}"
+
+- name: Run JKS generation script
+ local_action: script generate-jks.sh {{local_tmp.stdout}} {{openshift_logging_namespace}}
+ check_mode: no
+ become: yes
+ when: not elasticsearch_jks.stat.exists or not logging_es_jks.stat.exists or not system_admin_jks.stat.exists or not truststore_jks.stat.exists
+
+- name: Pushing locally generated JKS certs to remote host...
+ copy:
+ src: "{{local_tmp.stdout}}/elasticsearch.jks"
+ dest: "{{generated_certs_dir}}/elasticsearch.jks"
+ when: not elasticsearch_jks.stat.exists
+
+- name: Pushing locally generated JKS certs to remote host...
+ copy:
+ src: "{{local_tmp.stdout}}/logging-es.jks"
+ dest: "{{generated_certs_dir}}/logging-es.jks"
+ when: not logging_es_jks.stat.exists
+
+- name: Pushing locally generated JKS certs to remote host...
+ copy:
+ src: "{{local_tmp.stdout}}/system.admin.jks"
+ dest: "{{generated_certs_dir}}/system.admin.jks"
+ when: not system_admin_jks.stat.exists
+
+- name: Pushing locally generated JKS certs to remote host...
+ copy:
+ src: "{{local_tmp.stdout}}/truststore.jks"
+ dest: "{{generated_certs_dir}}/truststore.jks"
+ when: not truststore_jks.stat.exists
+
+- name: Cleaning up temp dir
+ file:
+ path: "{{local_tmp.stdout}}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging/tasks/generate_pems.yaml b/roles/openshift_logging/tasks/generate_pems.yaml
new file mode 100644
index 000000000..289b72ea6
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_pems.yaml
@@ -0,0 +1,36 @@
+---
+- name: Checking for {{component}}.key
+ stat: path="{{generated_certs_dir}}/{{component}}.key"
+ register: key_file
+ check_mode: no
+
+- name: Checking for {{component}}.crt
+ stat: path="{{generated_certs_dir}}/{{component}}.crt"
+ register: cert_file
+ check_mode: no
+
+- name: Creating cert req for {{component}}
+ command: >
+ openssl req -out {{generated_certs_dir}}/{{component}}.csr -new -newkey rsa:2048 -keyout {{generated_certs_dir}}/{{component}}.key
+ -subj "/CN={{component}}/OU=OpenShift/O=Logging/subjectAltName=DNS.1=localhost{{cert_ext.stdout}}" -days 712 -nodes
+ when:
+ - not key_file.stat.exists
+ - cert_ext.stdout is defined
+ check_mode: no
+
+- name: Creating cert req for {{component}}
+ command: >
+ openssl req -out {{generated_certs_dir}}/{{component}}.csr -new -newkey rsa:2048 -keyout {{generated_certs_dir}}/{{component}}.key
+ -subj "/CN={{component}}/OU=OpenShift/O=Logging" -days 712 -nodes
+ when:
+ - not key_file.stat.exists
+ - cert_ext.stdout is undefined
+ check_mode: no
+
+- name: Sign cert request with CA for {{component}}
+ command: >
+ openssl ca -in {{generated_certs_dir}}/{{component}}.csr -notext -out {{generated_certs_dir}}/{{component}}.crt
+ -config {{generated_certs_dir}}/signing.conf -extensions v3_req -batch -extensions server_ext
+ when:
+ - not cert_file.stat.exists
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml
new file mode 100644
index 000000000..601ec9e83
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_pvcs.yaml
@@ -0,0 +1,49 @@
+---
+- name: Init pool of PersistentVolumeClaim names
+ set_fact: es_pvc_pool={{es_pvc_pool|default([]) + [pvc_name]}}
+ vars:
+ pvc_name: "{{openshift_logging_es_pvc_prefix}}-{{item| int}}"
+ start: "{{es_pvc_names | map('regex_search',openshift_logging_es_pvc_prefix+'.*')|select('string')|list|length}}"
+ with_sequence: start={{start}} end={{ (start|int > openshift_logging_es_cluster_size - 1) | ternary(start, openshift_logging_es_cluster_size - 1)}}
+ when:
+ - openshift_logging_es_pvc_size | search('^\d.*')
+ - "{{ es_dc_names|default([]) | length < openshift_logging_es_cluster_size }}"
+ check_mode: no
+
+- name: Generating PersistentVolumeClaims
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{claim_name}}"
+ size: "{{openshift_logging_es_pvc_size}}"
+ access_modes:
+ - ReadWriteOnce
+ pv_selector: "{{openshift_logging_es_pv_selector}}"
+ with_items:
+ - "{{es_pvc_pool | default([])}}"
+ loop_control:
+ loop_var: claim_name
+ when:
+ - not openshift_logging_es_pvc_dynamic
+ - es_pvc_pool is defined
+ check_mode: no
+ changed_when: no
+
+- name: Generating PersistentVolumeClaims - Dynamic
+ template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
+ vars:
+ obj_name: "{{claim_name}}"
+ annotations:
+ volume.alpha.kubernetes.io/storage-class: "dynamic"
+ size: "{{openshift_logging_es_pvc_size}}"
+ access_modes:
+ - ReadWriteOnce
+ pv_selector: "{{openshift_logging_es_pv_selector}}"
+ with_items:
+ - "{{es_pvc_pool|default([])}}"
+ loop_control:
+ loop_var: claim_name
+ when:
+ - openshift_logging_es_pvc_dynamic
+ - es_pvc_pool is defined
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_rolebindings.yaml b/roles/openshift_logging/tasks/generate_rolebindings.yaml
new file mode 100644
index 000000000..7dc9530df
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_rolebindings.yaml
@@ -0,0 +1,12 @@
+---
+- name: Generate RoleBindings
+ template: src=rolebinding.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-rolebinding.yaml
+ vars:
+ obj_name: logging-elasticsearch-view-role
+ roleRef:
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml
new file mode 100644
index 000000000..25877ebff
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_routes.yaml
@@ -0,0 +1,21 @@
+---
+- name: Generating logging routes
+ template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-{{route_info.name}}-route.yaml
+ tags: routes
+ vars:
+ obj_name: "{{route_info.name}}"
+ route_host: "{{route_info.host}}"
+ service_name: "{{route_info.name}}"
+ tls_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
+ tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
+ labels:
+ component: support
+ logging-infra: support
+ provider: openshift
+ with_items:
+ - {name: logging-kibana, host: "{{openshift_logging_kibana_hostname}}"}
+ - {name: logging-kibana-ops, host: "{{openshift_logging_kibana_ops_hostname}}"}
+ loop_control:
+ loop_var: route_info
+ when: (route_info.name == 'logging-kibana-ops' and openshift_logging_use_ops) or route_info.name == 'logging-kibana'
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml
new file mode 100644
index 000000000..1829acaee
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_secrets.yaml
@@ -0,0 +1,77 @@
+---
+- name: Retrieving the cert to use when generating secrets for the logging components
+ slurp: src="{{generated_certs_dir}}/{{item.file}}"
+ register: key_pairs
+ with_items:
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "kibana_key", file: "system.logging.kibana.key"}
+ - { name: "kibana_cert", file: "system.logging.kibana.crt"}
+ - { name: "curator_key", file: "system.logging.curator.key"}
+ - { name: "curator_cert", file: "system.logging.curator.crt"}
+ - { name: "fluentd_key", file: "system.logging.fluentd.key"}
+ - { name: "fluentd_cert", file: "system.logging.fluentd.crt"}
+ - { name: "kibana_internal_key", file: "kibana-internal.key"}
+ - { name: "kibana_internal_cert", file: "kibana-internal.crt"}
+ - { name: "server_tls", file: "server-tls.json"}
+
+- name: Generating secrets for logging components
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
+ vars:
+ secret_name: logging-{{component}}
+ secret_key_file: "{{component}}_key"
+ secret_cert_file: "{{component}}_cert"
+ secrets:
+ - {key: ca, value: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
+ - {key: key, value: "{{key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
+ - {key: cert, value: "{{key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
+ secret_keys: ["ca", "cert", "key"]
+ with_items:
+ - kibana
+ - curator
+ - fluentd
+ loop_control:
+ loop_var: component
+ when: secret_name not in openshift_logging_facts.{{component}}.secrets or
+ secret_keys | difference(openshift_logging_facts.{{component}}.secrets["{{secret_name}}"]["keys"]) | length != 0
+ check_mode: no
+ changed_when: no
+
+- name: Generating secrets for kibana proxy
+ template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
+ vars:
+ secret_name: logging-kibana-proxy
+ secrets:
+ - {key: oauth-secret, value: "{{oauth_secret}}"}
+ - {key: session-secret, value: "{{session_secret}}"}
+ - {key: server-key, value: "{{kibana_key_file}}"}
+ - {key: server-cert, value: "{{kibana_cert_file}}"}
+ - {key: server-tls, value: "{{server_tls_file}}"}
+ secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"]
+ kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}"
+ kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}"
+ server_tls_file: "{{key_pairs | entry_from_named_pair('server_tls')| b64decode }}"
+ when: secret_name not in openshift_logging_facts.kibana.secrets or
+ secret_keys | difference(openshift_logging_facts.kibana.secrets["{{secret_name}}"]["keys"]) | length != 0
+ check_mode: no
+ changed_when: no
+
+- name: Generating secrets for elasticsearch
+ command: >
+ {{openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new {{secret_name}}
+ key={{generated_certs_dir}}/logging-es.jks truststore={{generated_certs_dir}}/truststore.jks
+ searchguard.key={{generated_certs_dir}}/elasticsearch.jks searchguard.truststore={{generated_certs_dir}}/truststore.jks
+ admin-key={{generated_certs_dir}}/system.admin.key admin-cert={{generated_certs_dir}}/system.admin.crt
+ admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml
+ vars:
+ secret_name: logging-elasticsearch
+ secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key"]
+ register: logging_es_secret
+ when: secret_name not in openshift_logging_facts.elasticsearch.secrets or
+ secret_keys | difference(openshift_logging_facts.elasticsearch.secrets["{{secret_name}}"]["keys"]) | length != 0
+ check_mode: no
+ changed_when: no
+
+- copy: content="{{logging_es_secret.stdout}}" dest={{mktemp.stdout}}/templates/logging-elasticsearch-secret.yaml
+ when: logging_es_secret.stdout is defined
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml
new file mode 100644
index 000000000..21bcdfecb
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml
@@ -0,0 +1,14 @@
+---
+- name: Generating serviceaccounts
+ template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/logging-{{component}}-sa.yaml
+ vars:
+ obj_name: aggregated-logging-{{component}}
+ with_items:
+ - elasticsearch
+ - kibana
+ - fluentd
+ - curator
+ loop_control:
+ loop_var: component
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml
new file mode 100644
index 000000000..8eaac76c4
--- /dev/null
+++ b/roles/openshift_logging/tasks/generate_services.yaml
@@ -0,0 +1,87 @@
+---
+- name: Generating logging-es service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-svc.yaml
+ vars:
+ obj_name: logging-es
+ ports:
+ - {port: 9200, targetPort: restapi}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-es-cluster service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-cluster-svc.yaml
+ vars:
+ obj_name: logging-es-cluster
+ ports:
+ - {port: 9300}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-kibana service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-svc.yaml
+ vars:
+ obj_name: logging-kibana
+ ports:
+ - {port: 443, targetPort: oaproxy}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: kibana
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-es-ops service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-svc.yaml
+ vars:
+ obj_name: logging-es-ops
+ ports:
+ - {port: 9200, targetPort: restapi}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es-ops
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-es-ops-cluster service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-cluster-svc.yaml
+ vars:
+ obj_name: logging-es-ops-cluster
+ ports:
+ - {port: 9300}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: es-ops
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
+
+- name: Generating logging-kibana-ops service
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-svc.yaml
+ vars:
+ obj_name: logging-kibana-ops
+ ports:
+ - {port: 443, targetPort: oaproxy}
+ labels:
+ logging-infra: support
+ selector:
+ provider: openshift
+ component: kibana-ops
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml
new file mode 100644
index 000000000..8f2825552
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_curator.yaml
@@ -0,0 +1,51 @@
+---
+- name: Check Curator current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: curator_replica_count
+ when: not ansible_check_mode
+ ignore_errors: yes
+ changed_when: no
+
+- name: Check Curator ops current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator-ops
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: curator_ops_replica_count
+ when:
+ - not ansible_check_mode
+ - openshift_logging_use_ops
+ ignore_errors: yes
+ changed_when: no
+
+- name: Generate curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-dc.yaml
+ vars:
+ component: curator
+ logging_component: curator
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}"
+ curator_memory_limit: "{{openshift_logging_curator_memory_limit }}"
+ replicas: "{{curator_replica_count.stdout | default (0)}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS curator deploymentconfig
+ template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-ops-dc.yaml
+ vars:
+ component: curator-ops
+ logging_component: curator
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ es_host: logging-es-ops
+ es_port: "{{openshift_logging_es_ops_port}}"
+ curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}"
+ curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}"
+ replicas: "{{curator_ops_replica_count.stdout | default (0)}}"
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
new file mode 100644
index 000000000..fbba46a35
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_elasticsearch.yaml
@@ -0,0 +1,107 @@
+---
+- name: Generate PersistentVolumeClaims
+ include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+ vars:
+ es_pvc_names: "{{openshift_logging_facts.elasticsearch.pvcs.keys()}}"
+ es_dc_names: "{{openshift_logging_facts.elasticsearch.deploymentconfigs.keys()}}"
+ when:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+
+- name: Init pool of DeploymentConfig names for Elasticsearch
+ set_fact: es_dc_pool={{es_dc_pool | default([]) + [deploy_name]}}
+ vars:
+ component: es
+ es_cluster_name: "{{component}}"
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ with_sequence: count={{(openshift_logging_es_cluster_size - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length) | abs}}
+ when:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+ check_mode: no
+
+
+- name: Generate Elasticsearch DeploymentConfig
+ template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+ vars:
+ component: es
+ logging_component: elasticsearch
+ deploy_name_prefix: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ es_cluster_name: "{{component}}"
+ es_cpu_limit: "{{openshift_logging_es_cpu_limit }}"
+ es_memory_limit: "{{openshift_logging_es_memory_limit}}"
+ volume_names: "{{es_pvc_pool | default([])}}"
+ pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
+ deploy_name: "{{item.1}}"
+ with_indexed_items:
+ - "{{es_dc_pool | default([])}}"
+ check_mode: no
+ when:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length < openshift_logging_es_cluster_size }}"
+ changed_when: no
+
+# --------- Tasks for Operation clusters ---------
+
+- name: Validate Elasticsearch cluster size for Ops
+ fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
+ vars:
+ es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}"
+ cluster_size: "{{openshift_logging_es_ops_cluster_size}}"
+ when:
+ - openshift_logging_use_ops
+ - "{{es_dcs | length - openshift_logging_es_ops_cluster_size | abs > 1}}"
+ check_mode: no
+
+- name: Generate PersistentVolumeClaims for Ops
+ include: "{{ role_path}}/tasks/generate_pvcs.yaml"
+ vars:
+ es_pvc_names: "{{openshift_logging_facts.elasticsearch_ops.pvcs.keys()}}"
+ es_dc_names: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys()}}"
+ openshift_logging_es_pvc_prefix: "{{openshift_logging_es_ops_pvc_prefix}}"
+ openshift_logging_es_cluster_size: "{{openshift_logging_es_ops_cluster_size}}"
+ openshift_logging_es_pvc_size: "{{openshift_logging_es_ops_pvc_size}}"
+ openshift_logging_es_pvc_dynamic: "{{openshift_logging_es_ops_pvc_dynamic}}"
+ openshift_logging_es_pv_selector: "{{openshift_logging_es_ops_pv_selector}}"
+ when:
+ - openshift_logging_use_ops
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
+ check_mode: no
+
+- name: Init pool of DeploymentConfig names for Elasticsearch for Ops
+ set_fact: es_dc_pool_ops={{es_dc_pool_ops | default([]) + [deploy_name]}}
+ vars:
+ component: es-ops
+ es_cluster_name: "{{component}}"
+ deploy_name_prefix: "logging-{{component}}"
+ deploy_name: "{{deploy_name_prefix}}-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
+ cluster_size: "{{openshift_logging_es_ops_cluster_size}}"
+ with_sequence: count={{openshift_logging_es_ops_cluster_size - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length}}
+ when:
+ - openshift_logging_use_ops
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
+ check_mode: no
+
+- name: Generate Elasticsearch DeploymentConfig for Ops
+ template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
+ vars:
+ component: es-ops
+ logging_component: elasticsearch
+ deploy_name_prefix: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
+ volume_names: "{{es_pvc_pool | default([])}}"
+ pvc_claim: "{{(volume_names | length > item.0) | ternary(volume_names[item.0], None)}}"
+ deploy_name: "{{item.1}}"
+ es_cluster_name: "{{component}}"
+ es_cpu_limit: "{{openshift_logging_es_ops_cpu_limit }}"
+ es_memory_limit: "{{openshift_logging_es_ops_memory_limit}}"
+ es_node_quorum: "{{es_ops_node_quorum}}"
+ es_recover_after_nodes: "{{es_ops_recover_after_nodes}}"
+ es_recover_expected_nodes: "{{es_ops_recover_expected_nodes}}"
+ openshift_logging_es_recover_after_time: "{{openshift_logging_es_ops_recover_after_time}}"
+ with_indexed_items:
+ - "{{es_dc_pool_ops | default([])}}"
+ when:
+ - openshift_logging_use_ops
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length < openshift_logging_es_ops_cluster_size }}"
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml
new file mode 100644
index 000000000..4c510c6e7
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_fluentd.yaml
@@ -0,0 +1,54 @@
+---
+- set_fact: fluentd_ops_host={{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
+ check_mode: no
+
+- set_fact: fluentd_ops_port={{ (openshift_logging_use_ops) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
+ check_mode: no
+
+- name: Generating Fluentd daemonset
+ template: src=fluentd.j2 dest={{mktemp.stdout}}/templates/logging-fluentd.yaml
+ vars:
+ daemonset_name: logging-fluentd
+ daemonset_component: fluentd
+ daemonset_container_name: fluentd-elasticsearch
+ daemonset_serviceAccount: aggregated-logging-fluentd
+ ops_host: "{{ fluentd_ops_host }}"
+ ops_port: "{{ fluentd_ops_port }}"
+ fluentd_nodeselector_key: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
+ fluentd_nodeselector_value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
+ check_mode: no
+ changed_when: no
+
+- name: "Check fluentd privileged permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get scc/privileged -o jsonpath='{.users}'
+ register: fluentd_privileged
+ check_mode: no
+ changed_when: no
+
+- name: "Set privileged permissions for fluentd"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: fluentd_output
+ failed_when: "fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr"
+ check_mode: no
+ when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
+
+- name: "Check fluentd cluster-reader permissions"
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}'
+ register: fluentd_cluster_reader
+ check_mode: no
+ changed_when: no
+
+- name: "Set cluster-reader permissions for fluentd"
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
+ add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
+ register: fluentd2_output
+ failed_when: "fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr"
+ check_mode: no
+ when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml
new file mode 100644
index 000000000..de4b018dd
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_kibana.yaml
@@ -0,0 +1,58 @@
+---
+- name: Check Kibana current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: kibana_replica_count
+ when: not ansible_check_mode
+ ignore_errors: yes
+ changed_when: no
+
+- name: Check Kibana ops current replica count
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana-ops
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: kibana_ops_replica_count
+ when:
+ - not ansible_check_mode
+ - openshift_logging_use_ops
+ ignore_errors: yes
+ changed_when: no
+
+
+- name: Generate kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-dc.yaml
+ vars:
+ component: kibana
+ logging_component: kibana
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es
+ es_port: "{{openshift_logging_es_port}}"
+ kibana_cpu_limit: "{{openshift_logging_kibana_cpu_limit }}"
+ kibana_memory_limit: "{{openshift_logging_kibana_memory_limit }}"
+ kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}"
+ kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}"
+ replicas: "{{kibana_replica_count.stdout | default (0)}}"
+ check_mode: no
+ changed_when: no
+
+- name: Generate OPS kibana deploymentconfig
+ template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-dc.yaml
+ vars:
+ component: kibana-ops
+ logging_component: kibana
+ deploy_name: "logging-{{component}}"
+ image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
+ proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
+ es_host: logging-es-ops
+ es_port: "{{openshift_logging_es_ops_port}}"
+ kibana_cpu_limit: "{{openshift_logging_kibana_ops_cpu_limit }}"
+ kibana_memory_limit: "{{openshift_logging_kibana_ops_memory_limit }}"
+ kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}"
+ kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}"
+ replicas: "{{kibana_ops_replica_count.stdout | default (0)}}"
+ when: openshift_logging_use_ops
+ check_mode: no
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
new file mode 100644
index 000000000..a9699adb8
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -0,0 +1,56 @@
+---
+- name: Gather OpenShift Logging Facts
+ openshift_logging_facts:
+ oc_bin: "{{openshift.common.client_binary}}"
+ admin_kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
+ openshift_logging_namespace: "{{openshift_logging_namespace}}"
+ tags: logging_facts
+ check_mode: no
+
+- name: Validate Elasticsearch cluster size
+ fail: msg="The openshift_logging_es_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
+ when: "{{openshift_logging_facts.elasticsearch.deploymentconfigs | length - openshift_logging_es_cluster_size | abs > 1}}"
+
+- name: Install logging
+ include: "{{ role_path }}/tasks/install_{{ install_component }}.yaml"
+ when: openshift_hosted_logging_install | default(true) | bool
+ with_items:
+ - support
+ - elasticsearch
+ - kibana
+ - curator
+ - fluentd
+ loop_control:
+ loop_var: install_component
+
+- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
+ register: object_def_files
+ changed_when: no
+
+- slurp: src={{item}}
+ register: object_defs
+ with_items: "{{object_def_files.files | map(attribute='path') | list | sort}}"
+ changed_when: no
+
+- name: Create objects
+ include: oc_apply.yaml
+ vars:
+ - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ - namespace: "{{ openshift_logging_namespace }}"
+ - file_name: "{{ file.source }}"
+ - file_content: "{{ file.content | b64decode | from_yaml }}"
+ with_items: "{{ object_defs.results }}"
+ loop_control:
+ loop_var: file
+ when: not ansible_check_mode
+
+- name: Printing out objects to create
+ debug: msg={{file.content | b64decode }}
+ with_items: "{{ object_defs.results }}"
+ loop_control:
+ loop_var: file
+ when: ansible_check_mode
+
+- name: Scaling up cluster
+ include: start_cluster.yaml
+ when: start_cluster | default(true) | bool
diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml
new file mode 100644
index 000000000..da0bbb627
--- /dev/null
+++ b/roles/openshift_logging/tasks/install_support.yaml
@@ -0,0 +1,54 @@
+---
+# This is the base configuration for installing the other components
+- name: Check for logging project already exists
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_logging_namespace}} --no-headers
+ register: logging_project_result
+ ignore_errors: yes
+ when: not ansible_check_mode
+ changed_when: no
+
+- name: "Create logging project"
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_logging_namespace}}
+ when: not ansible_check_mode and "not found" in logging_project_result.stderr
+
+- name: Create logging cert directory
+ file: path={{openshift.common.config_base}}/logging state=directory mode=0755
+ changed_when: False
+ check_mode: no
+
+- include: generate_certs.yaml
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+
+- name: Create temp directory for all our templates
+ file: path={{mktemp.stdout}}/templates state=directory mode=0755
+ changed_when: False
+ check_mode: no
+
+- include: generate_secrets.yaml
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+
+- include: generate_configmaps.yaml
+
+- include: generate_services.yaml
+
+- name: Generate kibana-proxy oauth client
+ template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml
+ vars:
+ secret: "{{oauth_secret}}"
+ when: oauth_secret is defined
+ check_mode: no
+ changed_when: no
+
+- include: generate_clusterroles.yaml
+
+- include: generate_rolebindings.yaml
+
+- include: generate_clusterrolebindings.yaml
+
+- include: generate_serviceaccounts.yaml
+
+- include: generate_routes.yaml
diff --git a/roles/openshift_logging/tasks/label_node.yaml b/roles/openshift_logging/tasks/label_node.yaml
new file mode 100644
index 000000000..aecb5d81b
--- /dev/null
+++ b/roles/openshift_logging/tasks/label_node.yaml
@@ -0,0 +1,29 @@
+---
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get node {{host}}
+ -o jsonpath='{.metadata.labels.{{ label }}}'
+ register: label_value
+ failed_when: label_value.rc == 1 and 'exists' not in label_value.stderr
+ when: not ansible_check_mode
+ changed_when: no
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}={{value}} --overwrite
+ register: label_result
+ failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
+ when:
+ - value is defined
+ - label_value.stdout is defined
+ - label_value.stdout != value
+ - unlabel is not defined or not unlabel
+ - not ansible_check_mode
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig label node {{host}} {{label}}-
+ register: label_result
+ failed_when: label_result.rc == 1 and 'exists' not in label_result.stderr
+ when:
+ - unlabel is defined
+ - unlabel
+ - not ansible_check_mode
+ - label_value.stdout != ""
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
new file mode 100644
index 000000000..4c718805e
--- /dev/null
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -0,0 +1,39 @@
+---
+- fail:
+ msg: Only one Fluentd nodeselector key pair should be provided
+ when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1"
+
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+ check_mode: no
+ tags: logging_init
+
+- debug: msg="Created temp dir {{mktemp.stdout}}"
+
+- name: Copy the admin client config(s)
+ command: >
+ cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
+ changed_when: False
+ check_mode: no
+ tags: logging_init
+
+- include: "{{ role_path }}/tasks/install_logging.yaml"
+ when: openshift_logging_install_logging | default(false) | bool
+
+- include: "{{ role_path }}/tasks/upgrade_logging.yaml"
+ when: openshift_logging_upgrade_logging | default(false) | bool
+
+- include: "{{ role_path }}/tasks/delete_logging.yaml"
+ when:
+ - not openshift_logging_install_logging | default(false) | bool
+ - not openshift_logging_upgrade_logging | default(false) | bool
+
+- name: Delete temp directory
+ file:
+ name: "{{ mktemp.stdout }}"
+ state: absent
+ tags: logging_cleanup
+ changed_when: False
+ check_mode: no
diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml
new file mode 100644
index 000000000..c362b7fca
--- /dev/null
+++ b/roles/openshift_logging/tasks/oc_apply.yaml
@@ -0,0 +1,29 @@
+---
+- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
+ shell: >
+ {{ openshift.common.client_binary }}
+ --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}} || echo 0
+ register: generation_init
+ changed_when: no
+
+- name: Applying {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ namespace }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: no
+
+- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
+ shell: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}} || echo 0
+ register: generation_changed
+ failed_when: "'error' in generation_changed.stderr"
+ changed_when: generation_changed.stdout | int > generation_init.stdout | int
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
new file mode 100644
index 000000000..44dd5e894
--- /dev/null
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -0,0 +1,52 @@
+---
+- name: Checking for {{ cert_info.procure_component }}.crt
+ stat: path="{{generated_certs_dir}}/{{ cert_info.procure_component }}.crt"
+ register: component_cert_file
+ check_mode: no
+
+- name: Checking for {{ cert_info.procure_component }}.key
+ stat: path="{{generated_certs_dir}}/{{ cert_info.procure_component }}.key"
+ register: component_key_file
+ check_mode: no
+
+- name: Trying to discover server cert variable name for {{ cert_info.procure_component }}
+ set_fact: procure_component_crt={{ lookup('env', '{{cert_info.procure_component}}' + '_crt') }}
+ when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined
+ check_mode: no
+
+- name: Trying to discover the server key variable name for {{ cert_info.procure_component }}
+ set_fact: procure_component_key={{ lookup('env', '{{cert_info.procure_component}}' + '_key') }}
+ when: cert_info.hostnames is undefined and {{ cert_info.procure_component }}_crt is defined and {{ cert_info.procure_component }}_key is defined
+ check_mode: no
+
+- name: Creating signed server cert and key for {{ cert_info.procure_component }}
+ command: >
+ {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
+ --key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
+ --hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
+ --signer-serial={{generated_certs_dir}}/ca.serial.txt
+ check_mode: no
+ when:
+ - cert_info.hostnames is defined
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
+
+- name: Copying server key for {{ cert_info.procure_component }} to generated certs directory
+ copy: content="{{procure_component_key}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.key
+ check_mode: no
+ when:
+ - cert_info.hostnames is undefined
+ - "{{ cert_info.procure_component }}_crt is defined"
+ - "{{ cert_info.procure_component }}_key is defined"
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
+
+- name: Copying Server cert for {{ cert_info.procure_component }} to generated certs directory
+ copy: content="{{procure_component_crt}}" dest={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
+ check_mode: no
+ when:
+ - cert_info.hostnames is undefined
+ - "{{ cert_info.procure_component }}_crt is defined"
+ - "{{ cert_info.procure_component }}_key is defined"
+ - not component_key_file.stat.exists
+ - not component_cert_file.stat.exists
diff --git a/roles/openshift_logging/tasks/scale.yaml b/roles/openshift_logging/tasks/scale.yaml
new file mode 100644
index 000000000..125d3b8af
--- /dev/null
+++ b/roles/openshift_logging/tasks/scale.yaml
@@ -0,0 +1,28 @@
+---
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}}
+ -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
+ register: replica_count
+ failed_when: replica_count.rc == 1 and 'exists' not in replica_count.stderr
+ when: not ansible_check_mode
+ changed_when: no
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale {{object}}
+ --replicas={{desired}} -n {{openshift_logging_namespace}}
+ register: scale_result
+ failed_when: scale_result.rc == 1 and 'exists' not in scale_result.stderr
+ when:
+ - not ansible_check_mode
+ - replica_count.stdout|int != desired
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}} -n {{openshift_logging_namespace}} -o jsonpath='{.status.replicas}'
+ register: replica_counts
+ until: replica_counts.stdout|int == desired
+ retries: 30
+ delay: 10
+ when:
+ - not ansible_check_mode
+ - replica_count.stdout|int != desired
+ changed_when: no
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
new file mode 100644
index 000000000..a96ad3f3a
--- /dev/null
+++ b/roles/openshift_logging/tasks/start_cluster.yaml
@@ -0,0 +1,104 @@
+---
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o jsonpath='{.items[*].metadata.name}'
+ register: fluentd_hosts
+ when: "'--all' in openshift_logging_fluentd_hosts"
+ check_mode: no
+ changed_when: no
+
+- name: start fluentd
+ include: label_node.yaml
+ vars:
+ host: "{{fluentd_host}}"
+ label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
+ value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
+ with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}"
+ loop_control:
+ loop_var: fluentd_host
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ check_mode: no
+ changed_when: no
+
+- name: start elasticsearch
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+ check_mode: no
+ changed_when: no
+
+- name: start kibana
+ include: scale.yaml
+ vars:
+ desired: "{{ openshift_logging_kibana_replica_count | default (1) }}"
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+ check_mode: no
+ changed_when: no
+
+- name: start curator
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ check_mode: no
+ changed_when: no
+
+- name: start elasticsearch-ops
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+ check_mode: no
+ changed_when: no
+
+- name: start kibana-ops
+ include: scale.yaml
+ vars:
+ desired: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}"
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+ check_mode: no
+ changed_when: no
+
+- name: start curator-ops
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
new file mode 100644
index 000000000..e44493e4d
--- /dev/null
+++ b/roles/openshift_logging/tasks/stop_cluster.yaml
@@ -0,0 +1,97 @@
+---
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get node -o jsonpath='{.items[*].metadata.name}'
+ register: fluentd_hosts
+ when: "'--all' in openshift_logging_fluentd_hosts"
+ changed_when: no
+
+- name: stop fluentd
+ include: label_node.yaml
+ vars:
+ host: "{{fluentd_host}}"
+ label: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
+ unlabel: True
+ with_items: "{{(fluentd_hosts.stdout_lines is defined) | ternary(fluentd_hosts.stdout_lines, openshift_logging_fluentd_hosts)}}"
+ loop_control:
+ loop_var: fluentd_host
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ changed_when: no
+
+- name: stop elasticsearch
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+ changed_when: no
+
+- name: stop kibana
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+ changed_when: no
+
+- name: stop curator
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es-ops -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ changed_when: no
+
+- name: stop elasticsearch-ops
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=kibana-ops -o name -n {{openshift_logging_namespace}}
+ register: kibana_dc
+ changed_when: no
+
+- name: stop kibana-ops
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{kibana_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=curator-ops -o name -n {{openshift_logging_namespace}}
+ register: curator_dc
+ changed_when: no
+
+- name: stop curator-ops
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{curator_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: openshift_logging_use_ops
diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml
new file mode 100644
index 000000000..a93463239
--- /dev/null
+++ b/roles/openshift_logging/tasks/upgrade_logging.yaml
@@ -0,0 +1,41 @@
+---
+- name: Stop the Cluster
+ include: stop_cluster.yaml
+
+- name: Upgrade logging
+ include: install_logging.yaml
+ vars:
+ start_cluster: False
+
+# start ES so that we can run migrate script
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get dc -l component=es -o name -n {{openshift_logging_namespace}}
+ register: es_dc
+ check_mode: no
+
+- name: start elasticsearch
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{es_dc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{ openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig get pods -n {{openshift_logging_namespace}} -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}'
+ register: running_pod
+ until: running_pod.stdout != ''
+ retries: 30
+ delay: 10
+ changed_when: no
+ check_mode: no
+
+- name: Run upgrade script
+ script: es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}}
+ register: script_output
+ changed_when:
+ - script_output.rc == 0
+ - script_output.stdout.find("skipping update_for_uuid") == -1 or script_output.stdout.find("skipping update_for_common_data_model") == -1
+
+- name: Start up rest of cluster
+ include: start_cluster.yaml
diff --git a/roles/openshift_logging/templates/clusterrole.j2 b/roles/openshift_logging/templates/clusterrole.j2
new file mode 100644
index 000000000..0d28db48e
--- /dev/null
+++ b/roles/openshift_logging/templates/clusterrole.j2
@@ -0,0 +1,21 @@
+apiVersion: v1
+kind: ClusterRole
+metadata:
+ name: {{obj_name}}
+rules:
+{% for rule in rules %}
+- resources:
+{% for kind in rule.resources %}
+ - {{ kind }}
+{% endfor %}
+ apiGroups:
+{% if rule.api_groups is defined %}
+{% for group in rule.api_groups %}
+ - {{ group }}
+{% endfor %}
+{% endif %}
+ verbs:
+{% for verb in rule.verbs %}
+ - {{ verb }}
+{% endfor %}
+{% endfor %}
diff --git a/roles/openshift_logging/templates/clusterrolebinding.j2 b/roles/openshift_logging/templates/clusterrolebinding.j2
new file mode 100644
index 000000000..2d25ff1fb
--- /dev/null
+++ b/roles/openshift_logging/templates/clusterrolebinding.j2
@@ -0,0 +1,24 @@
+apiVersion: v1
+kind: ClusterRoleBinding
+metadata:
+ name: {{obj_name}}
+{% if crb_usernames is defined %}
+userNames:
+{% for name in crb_usernames %}
+ - {{ name }}
+{% endfor %}
+{% endif %}
+{% if crb_groupnames is defined %}
+groupNames:
+{% for name in crb_groupnames %}
+ - {{ name }}
+{% endfor %}
+{% endif %}
+subjects:
+{% for sub in subjects %}
+ - kind: {{ sub.kind }}
+ name: {{ sub.name }}
+ namespace: {{sub.namespace}}
+{% endfor %}
+roleRef:
+ name: {{obj_name}}
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging/templates/curator.j2
new file mode 100644
index 000000000..d3b5d33a2
--- /dev/null
+++ b/roles/openshift_logging/templates/curator.j2
@@ -0,0 +1,97 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: {{replicas|default(0)}}
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Recreate
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ spec:
+ terminationGracePeriod: 600
+ serviceAccountName: aggregated-logging-curator
+ containers:
+ -
+ name: "curator"
+ image: {{image}}
+ imagePullPolicy: Always
+ resources:
+ limits:
+ cpu: "{{curator_cpu_limit}}"
+{% if curator_memory_limit is defined and curator_memory_limit is not none %}
+ memory: "{{curator_memory_limit}}"
+{% endif %}
+ env:
+ -
+ name: "K8S_HOST_URL"
+ value: "{{master_url}}"
+ -
+ name: "ES_HOST"
+ value: "{{es_host}}"
+ -
+ name: "ES_PORT"
+ value: "{{es_port}}"
+ -
+ name: "ES_CLIENT_CERT"
+ value: "/etc/curator/keys/cert"
+ -
+ name: "ES_CLIENT_KEY"
+ value: "/etc/curator/keys/key"
+ -
+ name: "ES_CA"
+ value: "/etc/curator/keys/ca"
+ -
+ name: "CURATOR_DEFAULT_DAYS"
+ value: "{{openshift_logging_curator_default_days}}"
+ -
+ name: "CURATOR_RUN_HOUR"
+ value: "{{openshift_logging_curator_run_hour}}"
+ -
+ name: "CURATOR_RUN_MINUTE"
+ value: "{{openshift_logging_curator_run_minute}}"
+ -
+ name: "CURATOR_RUN_TIMEZONE"
+ value: "{{openshift_logging_curator_run_timezone}}"
+ -
+ name: "CURATOR_SCRIPT_LOG_LEVEL"
+ value: "{{openshift_logging_curator_script_log_level}}"
+ -
+ name: "CURATOR_LOG_LEVEL"
+ value: "{{openshift_logging_curator_log_level}}"
+ volumeMounts:
+ - name: certs
+ mountPath: /etc/curator/keys
+ readOnly: true
+ - name: config
+ mountPath: /usr/curator/settings
+ readOnly: true
+ - name: elasticsearch-storage
+ mountPath: /elasticsearch/persistent
+ readOnly: true
+ volumes:
+ - name: certs
+ secret:
+ secretName: logging-curator
+ - name: config
+ configMap:
+ name: logging-curator
+ - name: elasticsearch-storage
+ emptyDir: {}
diff --git a/roles/openshift_logging/templates/elasticsearch.yml.j2 b/roles/openshift_logging/templates/elasticsearch.yml.j2
new file mode 100644
index 000000000..dad78b844
--- /dev/null
+++ b/roles/openshift_logging/templates/elasticsearch.yml.j2
@@ -0,0 +1,75 @@
+cluster:
+ name: ${CLUSTER_NAME}
+
+script:
+ inline: on
+ indexed: on
+
+index:
+ number_of_shards: 1
+ number_of_replicas: 0
+ auto_expand_replicas: 0-3
+ unassigned.node_left.delayed_timeout: 2m
+ translog:
+ flush_threshold_size: 256mb
+ flush_threshold_period: 5m
+
+node:
+ master: true
+ data: true
+
+network:
+ host: 0.0.0.0
+
+cloud:
+ kubernetes:
+ service: ${SERVICE_DNS}
+ namespace: ${NAMESPACE}
+
+discovery:
+ type: kubernetes
+ zen.ping.multicast.enabled: false
+
+gateway:
+ expected_master_nodes: ${NODE_QUORUM}
+ recover_after_nodes: ${RECOVER_AFTER_NODES}
+ expected_nodes: ${RECOVER_EXPECTED_NODES}
+ recover_after_time: ${RECOVER_AFTER_TIME}
+
+io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"]
+
+openshift.searchguard:
+ keystore.path: /etc/elasticsearch/secret/admin.jks
+ truststore.path: /etc/elasticsearch/secret/searchguard.truststore
+
+openshift.operations.allow_cluster_reader: {{allow_cluster_reader | default ('false')}}
+
+path:
+ data: /elasticsearch/persistent/${CLUSTER_NAME}/data
+ logs: /elasticsearch/${CLUSTER_NAME}/logs
+ work: /elasticsearch/${CLUSTER_NAME}/work
+ scripts: /elasticsearch/${CLUSTER_NAME}/scripts
+
+searchguard:
+ authcz.admin_dn:
+ - CN=system.admin,OU=OpenShift,O=Logging
+ config_index_name: ".searchguard.${HOSTNAME}"
+ ssl:
+ transport:
+ enabled: true
+ enforce_hostname_verification: false
+ keystore_type: JKS
+ keystore_filepath: /etc/elasticsearch/secret/searchguard.key
+ keystore_password: kspass
+ truststore_type: JKS
+ truststore_filepath: /etc/elasticsearch/secret/searchguard.truststore
+ truststore_password: tspass
+ http:
+ enabled: true
+ keystore_type: JKS
+ keystore_filepath: /etc/elasticsearch/secret/key
+ keystore_password: kspass
+ clientauth_mode: OPTIONAL
+ truststore_type: JKS
+ truststore_filepath: /etc/elasticsearch/secret/truststore
+ truststore_password: tspass
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging/templates/es.j2
new file mode 100644
index 000000000..291589690
--- /dev/null
+++ b/roles/openshift_logging/templates/es.j2
@@ -0,0 +1,105 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: {{replicas|default(0)}}
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ deployment: "{{deploy_name}}"
+ spec:
+ terminationGracePeriod: 600
+ serviceAccountName: aggregated-logging-elasticsearch
+ securityContext:
+ supplementalGroups:
+ - {{openshift_logging_es_storage_group}}
+ containers:
+ -
+ name: "elasticsearch"
+ image: {{image}}
+ imagePullPolicy: Always
+ resources:
+ limits:
+ memory: "{{es_memory_limit}}"
+{% if es_cpu_limit is defined and es_cpu_limit is not none %}
+ cpu: "{{es_cpu_limit}}"
+{% endif %}
+ requests:
+ memory: "512Mi"
+ ports:
+ -
+ containerPort: 9200
+ name: "restapi"
+ -
+ containerPort: 9300
+ name: "cluster"
+ env:
+ -
+ name: "NAMESPACE"
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ -
+ name: "KUBERNETES_TRUST_CERT"
+ value: "true"
+ -
+ name: "SERVICE_DNS"
+ value: "logging-{{es_cluster_name}}-cluster"
+ -
+ name: "CLUSTER_NAME"
+ value: "logging-{{es_cluster_name}}"
+ -
+ name: "INSTANCE_RAM"
+ value: "{{openshift_logging_es_memory_limit}}"
+ -
+ name: "NODE_QUORUM"
+ value: "{{es_node_quorum | int}}"
+ -
+ name: "RECOVER_AFTER_NODES"
+ value: "{{es_recover_after_nodes}}"
+ -
+ name: "RECOVER_EXPECTED_NODES"
+ value: "{{es_recover_expected_nodes}}"
+ -
+ name: "RECOVER_AFTER_TIME"
+ value: "{{openshift_logging_es_recover_after_time}}"
+ volumeMounts:
+ - name: elasticsearch
+ mountPath: /etc/elasticsearch/secret
+ readOnly: true
+ - name: elasticsearch-config
+ mountPath: /usr/share/java/elasticsearch/config
+ readOnly: true
+ - name: elasticsearch-storage
+ mountPath: /elasticsearch/persistent
+ volumes:
+ - name: elasticsearch
+ secret:
+ secretName: logging-elasticsearch
+ - name: elasticsearch-config
+ configMap:
+ name: logging-elasticsearch
+ - name: elasticsearch-storage
+{% if pvc_claim is defined and pvc_claim | trim | length > 0 %}
+ persistentVolumeClaim:
+ claimName: {{pvc_claim}}
+{% else %}
+ emptyDir: {}
+{% endif %}
diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2
new file mode 100644
index 000000000..b6c91f8ed
--- /dev/null
+++ b/roles/openshift_logging/templates/fluentd.j2
@@ -0,0 +1,149 @@
+apiVersion: extensions/v1beta1
+kind: "DaemonSet"
+metadata:
+ name: "{{daemonset_name}}"
+ labels:
+ provider: openshift
+ component: "{{daemonset_component}}"
+ logging-infra: "{{daemonset_component}}"
+spec:
+ selector:
+ matchLabels:
+ provider: openshift
+ component: "{{daemonset_component}}"
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ minReadySeconds: 600
+ template:
+ metadata:
+ name: "{{daemonset_container_name}}"
+ labels:
+ logging-infra: "{{daemonset_component}}"
+ provider: openshift
+ component: "{{daemonset_component}}"
+ spec:
+ serviceAccountName: "{{daemonset_serviceAccount}}"
+ nodeSelector:
+ {{fluentd_nodeselector_key}}: "{{fluentd_nodeselector_value}}"
+ containers:
+ - name: "{{daemonset_container_name}}"
+ image: "{{openshift_logging_image_prefix}}{{daemonset_name}}:{{openshift_logging_image_version}}"
+ imagePullPolicy: Always
+ securityContext:
+ privileged: true
+ resources:
+ limits:
+ cpu: {{openshift_logging_fluentd_cpu_limit}}
+ memory: {{openshift_logging_fluentd_memory_limit}}
+ volumeMounts:
+ - name: runlogjournal
+ mountPath: /run/log/journal
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ - name: config
+ mountPath: /etc/fluent/configs.d/user
+ readOnly: true
+ - name: certs
+ mountPath: /etc/fluent/keys
+ readOnly: true
+ - name: dockerhostname
+ mountPath: /etc/docker-hostname
+ readOnly: true
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: dockercfg
+ mountPath: /etc/sysconfig/docker
+ readOnly: true
+ env:
+ - name: "K8S_HOST_URL"
+ value: "{{master_url}}"
+ - name: "ES_HOST"
+ value: "{{openshift_logging_es_host}}"
+ - name: "ES_PORT"
+ value: "{{openshift_logging_es_port}}"
+ - name: "ES_CLIENT_CERT"
+ value: "{{openshift_logging_es_client_cert}}"
+ - name: "ES_CLIENT_KEY"
+ value: "{{openshift_logging_es_client_key}}"
+ - name: "ES_CA"
+ value: "{{openshift_logging_es_ca}}"
+ - name: "OPS_HOST"
+ value: "{{ops_host}}"
+ - name: "OPS_PORT"
+ value: "{{ops_port}}"
+ - name: "OPS_CLIENT_CERT"
+ value: "{{openshift_logging_es_ops_client_cert}}"
+ - name: "OPS_CLIENT_KEY"
+ value: "{{openshift_logging_es_ops_client_key}}"
+ - name: "OPS_CA"
+ value: "{{openshift_logging_es_ops_ca}}"
+ - name: "ES_COPY"
+ value: "{{openshift_logging_fluentd_es_copy|lower}}"
+ - name: "ES_COPY_HOST"
+ value: "{{es_copy_host | default('')}}"
+ - name: "ES_COPY_PORT"
+ value: "{{es_copy_port | default('')}}"
+ - name: "ES_COPY_SCHEME"
+ value: "{{es_copy_scheme | default('https')}}"
+ - name: "ES_COPY_CLIENT_CERT"
+ value: "{{es_copy_client_cert | default('')}}"
+ - name: "ES_COPY_CLIENT_KEY"
+ value: "{{es_copy_client_key | default('')}}"
+ - name: "ES_COPY_CA"
+ value: "{{es_copy_ca | default('')}}"
+ - name: "ES_COPY_USERNAME"
+ value: "{{es_copy_username | default('')}}"
+ - name: "ES_COPY_PASSWORD"
+ value: "{{es_copy_password | default('')}}"
+ - name: "OPS_COPY_HOST"
+ value: "{{ops_copy_host | default('')}}"
+ - name: "OPS_COPY_PORT"
+ value: "{{ops_copy_port | default('')}}"
+ - name: "OPS_COPY_SCHEME"
+ value: "{{ops_copy_scheme | default('https')}}"
+ - name: "OPS_COPY_CLIENT_CERT"
+ value: "{{ops_copy_client_cert | default('')}}"
+ - name: "OPS_COPY_CLIENT_KEY"
+ value: "{{ops_copy_client_key | default('')}}"
+ - name: "OPS_COPY_CA"
+ value: "{{ops_copy_ca | default('')}}"
+ - name: "OPS_COPY_USERNAME"
+ value: "{{ops_copy_username | default('')}}"
+ - name: "OPS_COPY_PASSWORD"
+ value: "{{ops_copy_password | default('')}}"
+ - name: "USE_JOURNAL"
+ value: "{{openshift_logging_fluentd_use_journal|lower}}"
+ - name: "JOURNAL_SOURCE"
+ value: "{{fluentd_journal_source | default('')}}"
+ - name: "JOURNAL_READ_FROM_HEAD"
+ value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
+ volumes:
+ - name: runlogjournal
+ hostPath:
+ path: /run/log/journal
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
+ - name: config
+ configMap:
+ name: logging-fluentd
+ - name: certs
+ secret:
+ secretName: logging-fluentd
+ - name: dockerhostname
+ hostPath:
+ path: /etc/hostname
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: dockercfg
+ hostPath:
+ path: /etc/sysconfig/docker
diff --git a/roles/openshift_logging/templates/jks_pod.j2 b/roles/openshift_logging/templates/jks_pod.j2
new file mode 100644
index 000000000..8b1c74211
--- /dev/null
+++ b/roles/openshift_logging/templates/jks_pod.j2
@@ -0,0 +1,28 @@
+apiVersion: v1
+kind: Pod
+metadata:
+ labels:
+ logging-infra: support
+ generateName: jks-cert-gen-
+spec:
+ containers:
+ - name: jks-cert-gen
+ image: {{openshift_logging_image_prefix}}logging-deployer:{{openshift_logging_image_version}}
+ imagePullPolicy: Always
+ command: ["sh", "{{generated_certs_dir}}/generate-jks.sh"]
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - mountPath: {{generated_certs_dir}}
+ name: certmount
+ env:
+ - name: PROJECT
+ value: {{openshift_logging_namespace}}
+ - name: CERT_DIR
+ value: {{generated_certs_dir}}
+ restartPolicy: Never
+ serviceAccount: jks-generator
+ volumes:
+ - hostPath:
+ path: "{{generated_certs_dir}}"
+ name: certmount
diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging/templates/kibana.j2
new file mode 100644
index 000000000..1ec97701a
--- /dev/null
+++ b/roles/openshift_logging/templates/kibana.j2
@@ -0,0 +1,110 @@
+apiVersion: "v1"
+kind: "DeploymentConfig"
+metadata:
+ name: "{{deploy_name}}"
+ labels:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+spec:
+ replicas: {{replicas|default(0)}}
+ selector:
+ provider: openshift
+ component: "{{component}}"
+ logging-infra: "{{logging_component}}"
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ name: "{{deploy_name}}"
+ labels:
+ logging-infra: "{{logging_component}}"
+ provider: openshift
+ component: "{{component}}"
+ spec:
+ serviceAccountName: aggregated-logging-kibana
+ containers:
+ -
+ name: "kibana"
+ image: {{image}}
+ imagePullPolicy: Always
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none) or (kibana_cpu_limit is defined and kibana_cpu_limit is not none) %}
+ resources:
+ limits:
+{% if kibana_cpu_limit is not none %}
+ cpu: "{{kibana_cpu_limit}}"
+{% endif %}
+{% if kibana_memory_limit is not none %}
+ memory: "{{kibana_memory_limit}}"
+{% endif %}
+{% endif %}
+ env:
+ - name: "ES_HOST"
+ value: "{{es_host}}"
+ - name: "ES_PORT"
+ value: "{{es_port}}"
+ volumeMounts:
+ - name: kibana
+ mountPath: /etc/kibana/keys
+ readOnly: true
+ -
+ name: "kibana-proxy"
+ image: {{proxy_image}}
+ imagePullPolicy: Always
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none) or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none) %}
+ resources:
+ limits:
+{% if kibana_proxy_cpu_limit is not none %}
+ cpu: "{{kibana_proxy_cpu_limit}}"
+{% endif %}
+{% if kibana_proxy_memory_limit is not none %}
+ memory: "{{kibana_proxy_memory_limit}}"
+{% endif %}
+{% endif %}
+ ports:
+ -
+ name: "oaproxy"
+ containerPort: 3000
+ env:
+ -
+ name: "OAP_BACKEND_URL"
+ value: "http://localhost:5601"
+ -
+ name: "OAP_AUTH_MODE"
+ value: "oauth2"
+ -
+ name: "OAP_TRANSFORM"
+ value: "user_header,token_header"
+ -
+ name: "OAP_OAUTH_ID"
+ value: kibana-proxy
+ -
+ name: "OAP_MASTER_URL"
+ value: {{master_url}}
+ -
+ name: "OAP_PUBLIC_MASTER_URL"
+ value: {{public_master_url}}
+ -
+ name: "OAP_LOGOUT_REDIRECT"
+ value: {{public_master_url}}/console/logout
+ -
+ name: "OAP_MASTER_CA_FILE"
+ value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
+ -
+ name: "OAP_DEBUG"
+ value: "{{openshift_logging_kibana_proxy_debug}}"
+ volumeMounts:
+ - name: kibana-proxy
+ mountPath: /secret
+ readOnly: true
+ volumes:
+ - name: kibana
+ secret:
+ secretName: logging-kibana
+ - name: kibana-proxy
+ secret:
+ secretName: logging-kibana-proxy
diff --git a/roles/openshift_logging/templates/oauth-client.j2 b/roles/openshift_logging/templates/oauth-client.j2
new file mode 100644
index 000000000..41d3123cb
--- /dev/null
+++ b/roles/openshift_logging/templates/oauth-client.j2
@@ -0,0 +1,15 @@
+apiVersion: v1
+kind: OAuthClient
+metadata:
+ name: kibana-proxy
+ labels:
+ logging-infra: support
+secret: {{secret}}
+redirectURIs:
+- https://{{openshift_logging_kibana_hostname}}
+- https://{{openshift_logging_kibana_ops_hostname}}
+scopeRestrictions:
+- literals:
+ - user:info
+ - user:check-access
+ - user:list-projects
diff --git a/roles/openshift_logging/templates/pvc.j2 b/roles/openshift_logging/templates/pvc.j2
new file mode 100644
index 000000000..f19a3a750
--- /dev/null
+++ b/roles/openshift_logging/templates/pvc.j2
@@ -0,0 +1,27 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{obj_name}}
+ labels:
+ logging-infra: support
+{% if annotations is defined %}
+ annotations:
+{% for key,value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+{% if pv_selector is defined and pv_selector is mapping %}
+ selector:
+ matchLabels:
+{% for key,value in pv_selector.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+ accessModes:
+{% for mode in access_modes %}
+ - {{ mode }}
+{% endfor %}
+ resources:
+ requests:
+ storage: {{size}}
diff --git a/roles/openshift_logging/templates/rolebinding.j2 b/roles/openshift_logging/templates/rolebinding.j2
new file mode 100644
index 000000000..fcd4e87cc
--- /dev/null
+++ b/roles/openshift_logging/templates/rolebinding.j2
@@ -0,0 +1,14 @@
+apiVersion: v1
+kind: RoleBinding
+metadata:
+ name: {{obj_name}}
+roleRef:
+{% if roleRef.kind is defined %}
+ kind: {{ roleRef.kind }}
+{% endif %}
+ name: {{ roleRef.name }}
+subjects:
+{% for sub in subjects %}
+ - kind: {{ sub.kind }}
+ name: {{ sub.name }}
+{% endfor %}
diff --git a/roles/openshift_logging/templates/route_reencrypt.j2 b/roles/openshift_logging/templates/route_reencrypt.j2
new file mode 100644
index 000000000..8be30a2c4
--- /dev/null
+++ b/roles/openshift_logging/templates/route_reencrypt.j2
@@ -0,0 +1,25 @@
+apiVersion: "v1"
+kind: "Route"
+metadata:
+ name: "{{obj_name}}"
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+ host: {{ route_host }}
+ tls:
+ caCertificate: |
+{% for line in tls_ca_cert.split('\n') %}
+ {{ line }}
+{% endfor %}
+ destinationCACertificate: |
+{% for line in tls_dest_ca_cert.split('\n') %}
+ {{ line }}
+{% endfor %}
+ termination: reencrypt
+ to:
+ kind: Service
+ name: {{ service_name }}
diff --git a/roles/openshift_logging/templates/secret.j2 b/roles/openshift_logging/templates/secret.j2
new file mode 100644
index 000000000..d73bae9c4
--- /dev/null
+++ b/roles/openshift_logging/templates/secret.j2
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{secret_name}}
+type: Opaque
+data:
+{% for s in secrets %}
+ {{s.key}}: {{s.value | b64encode}}
+{% endfor %}
diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2
new file mode 100644
index 000000000..6c4ec0c76
--- /dev/null
+++ b/roles/openshift_logging/templates/service.j2
@@ -0,0 +1,28 @@
+apiVersion: "v1"
+kind: "Service"
+metadata:
+ name: "{{obj_name}}"
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+ ports:
+{% for port in ports %}
+ -
+{% for key, value in port.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% if port.targetPort is undefined %}
+ clusterIP: "None"
+{% endif %}
+{% endfor %}
+{% if service_targetPort is defined %}
+ targetPort: {{service_targetPort}}
+{% endif %}
+ selector:
+ {% for key, value in selector.iteritems() %}
+ {{key}}: {{value}}
+ {% endfor %}
diff --git a/roles/openshift_logging/templates/serviceaccount.j2 b/roles/openshift_logging/templates/serviceaccount.j2
new file mode 100644
index 000000000..b22acc594
--- /dev/null
+++ b/roles/openshift_logging/templates/serviceaccount.j2
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{obj_name}}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+{% if secrets is defined %}
+secrets:
+{% for name in secrets %}
+- name: {{ name }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_logging/templates/signing.conf.j2 b/roles/openshift_logging/templates/signing.conf.j2
new file mode 100644
index 000000000..727cde4c9
--- /dev/null
+++ b/roles/openshift_logging/templates/signing.conf.j2
@@ -0,0 +1,103 @@
+# Simple Signing CA
+
+# The [default] section contains global constants that can be referred to from
+# the entire configuration file. It may also hold settings pertaining to more
+# than one openssl command.
+
+[ default ]
+dir = {{top_dir}} # Top dir
+
+# The next part of the configuration file is used by the openssl req command.
+# It defines the CA's key pair, its DN, and the desired extensions for the CA
+# certificate.
+
+[ req ]
+default_bits = 2048 # RSA key size
+encrypt_key = yes # Protect private key
+default_md = sha1 # MD to use
+utf8 = yes # Input is UTF-8
+string_mask = utf8only # Emit UTF-8 strings
+prompt = no # Don't prompt for DN
+distinguished_name = ca_dn # DN section
+req_extensions = ca_reqext # Desired extensions
+
+[ ca_dn ]
+0.domainComponent = "io"
+1.domainComponent = "openshift"
+organizationName = "OpenShift Origin"
+organizationalUnitName = "Logging Signing CA"
+commonName = "Logging Signing CA"
+
+[ ca_reqext ]
+keyUsage = critical,keyCertSign,cRLSign
+basicConstraints = critical,CA:true,pathlen:0
+subjectKeyIdentifier = hash
+
+# The remainder of the configuration file is used by the openssl ca command.
+# The CA section defines the locations of CA assets, as well as the policies
+# applying to the CA.
+
+[ ca ]
+default_ca = signing_ca # The default CA section
+
+[ signing_ca ]
+certificate = $dir/ca.crt # The CA cert
+private_key = $dir/ca.key # CA private key
+new_certs_dir = $dir/ # Certificate archive
+serial = $dir/ca.serial.txt # Serial number file
+crlnumber = $dir/ca.crl.srl # CRL number file
+database = $dir/ca.db # Index file
+unique_subject = no # Require unique subject
+default_days = 730 # How long to certify for
+default_md = sha1 # MD to use
+policy = any_pol # Default naming policy
+email_in_dn = no # Add email to cert DN
+preserve = no # Keep passed DN ordering
+name_opt = ca_default # Subject DN display options
+cert_opt = ca_default # Certificate display options
+copy_extensions = copy # Copy extensions from CSR
+x509_extensions = client_ext # Default cert extensions
+default_crl_days = 7 # How long before next CRL
+crl_extensions = crl_ext # CRL extensions
+
+# Naming policies control which parts of a DN end up in the certificate and
+# under what circumstances certification should be denied.
+
+[ match_pol ]
+domainComponent = match # Must match 'simple.org'
+organizationName = match # Must match 'Simple Inc'
+organizationalUnitName = optional # Included if present
+commonName = supplied # Must be present
+
+[ any_pol ]
+domainComponent = optional
+countryName = optional
+stateOrProvinceName = optional
+localityName = optional
+organizationName = optional
+organizationalUnitName = optional
+commonName = optional
+emailAddress = optional
+
+# Certificate extensions define what types of certificates the CA is able to
+# create.
+
+[ client_ext ]
+keyUsage = critical,digitalSignature,keyEncipherment
+basicConstraints = CA:false
+extendedKeyUsage = clientAuth
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid
+
+[ server_ext ]
+keyUsage = critical,digitalSignature,keyEncipherment
+basicConstraints = CA:false
+extendedKeyUsage = serverAuth,clientAuth
+subjectKeyIdentifier = hash
+authorityKeyIdentifier = keyid
+
+# CRL extensions exist solely to point to the CA certificate that has issued
+# the CRL.
+
+[ crl_ext ]
+authorityKeyIdentifier = keyid
diff --git a/roles/openshift_logging/vars/main.yaml b/roles/openshift_logging/vars/main.yaml
new file mode 100644
index 000000000..11662c446
--- /dev/null
+++ b/roles/openshift_logging/vars/main.yaml
@@ -0,0 +1,8 @@
+---
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+es_node_quorum: "{{openshift_logging_es_cluster_size/2 + 1}}"
+es_recover_after_nodes: "{{openshift_logging_es_cluster_size - 1}}"
+es_recover_expected_nodes: "{{openshift_logging_es_cluster_size}}"
+es_ops_node_quorum: "{{openshift_logging_es_ops_cluster_size/2 + 1}}"
+es_ops_recover_after_nodes: "{{openshift_logging_es_ops_cluster_size - 1}}"
+es_ops_recover_expected_nodes: "{{openshift_logging_es_ops_cluster_size}}"
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index 7457e4378..56af0cf36 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -11,4 +11,34 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: openshift_master_facts
+- role: openshift_hosted_facts
+- role: openshift_master_certificates
+- role: openshift_etcd_client_certificates
+ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ etcd_cert_prefix: "master.etcd-"
+ when: groups.oo_etcd_to_config | default([]) | length != 0
+- role: openshift_clock
+- role: openshift_cloud_provider
+- role: openshift_builddefaults
+- role: openshift_buildoverrides
+- role: os_firewall
+ os_firewall_allow:
+ - service: api server https
+ port: "{{ openshift.master.api_port }}/tcp"
+ - service: api controllers https
+ port: "{{ openshift.master.controllers_port }}/tcp"
+ - service: skydns tcp
+ port: "{{ openshift.master.dns_port }}/tcp"
+ - service: skydns udp
+ port: "{{ openshift.master.dns_port }}/udp"
+- role: os_firewall
+ os_firewall_allow:
+ - service: etcd embedded
+ port: 4001/tcp
+ when: groups.oo_etcd_to_config | default([]) | length == 0
+- role: nickhammond.logrotate
+- role: nuage_master
+ when: openshift.common.use_nuage | bool
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 81546c829..fcb8125e9 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -123,7 +123,7 @@ kubernetesMasterConfig:
keyFile: master.proxy-client.key
schedulerArguments: {{ openshift_master_scheduler_args | default(None) | to_padded_yaml( level=3 ) }}
schedulerConfigFile: {{ openshift_master_scheduler_conf }}
- servicesNodePortRange: ""
+ servicesNodePortRange: "{{ openshift_node_port_range | default("") }}"
servicesSubnet: {{ openshift.common.portal_net }}
staticNodeNames: {{ openshift_node_ips | default([], true) }}
{% endif %}
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index e9b7de330..4620dd877 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -30,7 +30,6 @@
| oo_collect(attribute='stat.exists')
| list)) }}"
-
- name: Ensure the generated_configs directory present
file:
path: "{{ openshift_master_generated_config_dir }}"
@@ -39,30 +38,50 @@
when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
delegate_to: "{{ openshift_ca_host }}"
-- file:
- src: "{{ openshift_master_config_dir }}/{{ item }}"
- dest: "{{ openshift_master_generated_config_dir }}/{{ item }}"
- state: hard
- with_items:
- - ca.crt
- - ca.key
- - ca.serial.txt
- when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
- delegate_to: "{{ openshift_ca_host }}"
-
-- name: Create the master certificates if they do not already exist
+- name: Create the master server certificate
command: >
- {{ openshift.common.client_binary }} adm create-master-certs
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
- --hostnames={{ openshift.common.all_hostnames | join(',') }}
- --master={{ openshift.master.api_url }}
- --public-master={{ openshift.master.public_api_url }}
- --cert-dir={{ openshift_master_generated_config_dir }}
+ --hostnames={{ hostvars[item].openshift.common.all_hostnames | join(',') }}
+ --cert={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/master.server.crt
+ --key={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/master.server.key
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
--overwrite=false
- when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
+ with_items: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True})
+ | difference([openshift_ca_host])}}"
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
+- name: Generate the master client config
+ command: >
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ --certificate-authority {{ named_ca_certificate }}
+ {% endfor %}
+ --certificate-authority={{ openshift_ca_cert }}
+ --client-dir={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}
+ --groups=system:masters,system:openshift-master
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.public_api_url }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
+ --user=system:openshift-master
+ --basename=openshift-master
+ args:
+ creates: "{{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/openshift-master.kubeconfig"
+ with_items: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True})
+ | difference([openshift_ca_host])}}"
delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
- file:
src: "{{ openshift_master_config_dir }}/{{ item }}"
@@ -86,7 +105,7 @@
- name: Create local temp directory for syncing certs
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: g_master_mktemp
+ register: g_master_certs_mktemp
changed_when: False
when: master_certs_missing | bool
delegate_to: localhost
@@ -104,7 +123,7 @@
- name: Retrieve the master cert tarball from the master
fetch:
src: "{{ openshift_master_generated_config_dir }}.tgz"
- dest: "{{ g_master_mktemp.stdout }}/"
+ dest: "{{ g_master_certs_mktemp.stdout }}/"
flat: yes
fail_on_missing: yes
validate_checksum: yes
@@ -119,11 +138,11 @@
- name: Unarchive the tarball on the master
unarchive:
- src: "{{ g_master_mktemp.stdout }}/{{ openshift_master_cert_subdir }}.tgz"
+ src: "{{ g_master_certs_mktemp.stdout }}/{{ openshift_master_cert_subdir }}.tgz"
dest: "{{ openshift_master_config_dir }}"
when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
-- file: name={{ g_master_mktemp.stdout }} state=absent
+- file: name={{ g_master_certs_mktemp.stdout }} state=absent
changed_when: False
when: master_certs_missing | bool
delegate_to: localhost
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
index 4f7461827..29a59a0d3 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_predicates.py
@@ -40,10 +40,10 @@ class LookupModule(LookupBase):
# pylint: disable=line-too-long
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
- if short_version not in ['3.1', '3.2', '3.3', '3.4']:
+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
else:
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
diff --git a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
index 7087ff03c..36022597f 100644
--- a/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
+++ b/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py
@@ -45,10 +45,10 @@ class LookupModule(LookupBase):
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
- if short_version not in ['1.1', '1.2', '1.3', '1.4']:
+ if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '1.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
- if short_version not in ['3.1', '3.2', '3.3', '3.4']:
+ if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
else:
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
diff --git a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
index c95356908..07bac6826 100644
--- a/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
+++ b/roles/openshift_master_facts/test/openshift_master_facts_default_predicates_tests.py
@@ -65,7 +65,11 @@ TEST_VARS = [
('1.3', 'origin', DEFAULT_PREDICATES_1_3),
('3.3', 'openshift-enterprise', DEFAULT_PREDICATES_1_3),
('1.4', 'origin', DEFAULT_PREDICATES_1_4),
- ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4)
+ ('3.4', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
+ ('1.5', 'origin', DEFAULT_PREDICATES_1_4),
+ ('3.5', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
+ ('1.6', 'origin', DEFAULT_PREDICATES_1_4),
+ ('3.6', 'openshift-enterprise', DEFAULT_PREDICATES_1_4),
]
diff --git a/roles/openshift_master_facts/vars/main.yml b/roles/openshift_master_facts/vars/main.yml
index fa745eb66..bf6d2402d 100644
--- a/roles/openshift_master_facts/vars/main.yml
+++ b/roles/openshift_master_facts/vars/main.yml
@@ -2,24 +2,3 @@
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
openshift_master_config_file: "{{ openshift_master_config_dir }}/master-config.yaml"
openshift_master_scheduler_conf: "{{ openshift_master_config_dir }}/scheduler.json"
-
-builddefaults_yaml:
- BuildDefaults:
- configuration:
- apiVersion: v1
- kind: BuildDefaultsConfig
- gitHTTPProxy: "{{ openshift.master.builddefaults_git_http_proxy | default(omit, true) }}"
- gitHTTPSProxy: "{{ openshift.master.builddefaults_git_https_proxy | default(omit, true) }}"
- env:
- - name: HTTP_PROXY
- value: "{{ openshift.master.builddefaults_http_proxy | default(omit, true) }}"
- - name: HTTPS_PROXY
- value: "{{ openshift.master.builddefaults_https_proxy | default(omit, true) }}"
- - name: NO_PROXY
- value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}"
- - name: http_proxy
- value: "{{ openshift.master.builddefaults_http_proxy | default(omit, true) }}"
- - name: https_proxy
- value: "{{ openshift.master.builddefaults_https_proxy | default(omit, true) }}"
- - name: no_proxy
- value: "{{ openshift.master.builddefaults_no_proxy | default(omit, true) | join(',') }}"
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index 015a673a8..a61b0db5e 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -5,33 +5,85 @@ OpenShift Metrics Installation
Requirements
------------
+This role has the following dependencies:
+
+- Java is required on the control node to generate keystores for the Java components
+- httpd-tools is required on the control node to generate various passwords for the metrics components
+
+The following variables need to be set and will be validated:
+
+- `openshift_metrics_hawkular_hostname`: hostname used on the hawkular metrics route.
+
+- `openshift_metrics_project`: project (i.e. namespace) where the components will be
+ deployed.
-* Ansible 2.2
-* It requires subdomain fqdn to be set.
-* If persistence is enabled, then it also requires NFS.
Role Variables
--------------
-From this role:
+For default values, see [`defaults/main.yaml`](defaults/main.yaml).
+
+- `openshift_metrics_image_prefix`: Specify prefix for metrics components; e.g for
+ "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-".
+
+- `openshift_metrics_image_version`: Specify version for metrics components; e.g. for
+ "openshift/origin-metrics-deployer:v1.1", set version "v1.1".
+
+- `openshift_metrics_hawkular_cert:` The certificate used for re-encrypting the route
+ to Hawkular metrics. The certificate must contain the hostname used by the route.
+ The default router certificate will be used if unspecified
+
+- `openshift_metrics_hawkular_key:` The key used with the Hawkular certificate
+
+- `openshift_metrics_hawkular_ca:` An optional certificate used to sign the Hawkular certificate.
+
+- `openshift_metrics_hawkular_replicas:` The number of replicas for Hawkular metrics.
+
+- `openshift_metrics_cassandra_replicas`: The number of Cassandra nodes to deploy for the
+ initial cluster.
+
+- `openshift_metrics_cassandra_storage_type`: Use `emptydir` for ephemeral storage (for
+ testing), `pv` to use persistent volumes (which need to be created before the
+ installation) or `dynamic` for dynamic persistent volumes.
-| Name | Default value | |
-|-------------------------------------------------|-----------------------|-------------------------------------------------------------|
-| openshift_hosted_metrics_deploy | `False` | If metrics should be deployed |
-| openshift_hosted_metrics_public_url | null | Hawkular metrics public url |
-| openshift_hosted_metrics_storage_nfs_directory | `/exports` | Root export directory. |
-| openshift_hosted_metrics_storage_volume_name | `metrics` | Metrics volume within openshift_hosted_metrics_volume_dir |
-| openshift_hosted_metrics_storage_volume_size | `10Gi` | Metrics volume size |
-| openshift_hosted_metrics_storage_nfs_options | `*(rw,root_squash)` | NFS options for configured exports. |
-| openshift_hosted_metrics_duration | `7` | Metrics query duration |
-| openshift_hosted_metrics_resolution | `10s` | Metrics resolution |
+- `openshift_metrics_cassandra_pv_prefix`: The name of persistent volume claims created
+ for cassandra will be this with a serial number appended to the end, starting
+ from 1.
+- `openshift_metrics_cassandra_pv_size`: The persistent volume size for each of the
+ Cassandra nodes.
+
+- `openshift_metrics_heapster_standalone`: Deploy only heapster, without the Hawkular Metrics and
+ Cassandra components.
+
+- `openshift_metrics_heapster_allowed_users`: A comma-separated list of CN to accept. By
+ default, this is set to allow the OpenShift service proxy to connect. If you
+ override this, make sure to add `system:master-proxy` to the list in order to
+ allow horizontal pod autoscaling to function properly.
+
+- `openshift_metrics_startup_timeout`: How long in seconds we should wait until
+ Hawkular Metrics and Heapster starts up before attempting a restart.
+
+- `openshift_metrics_duration`: How many days metrics should be stored for.
+
+- `openshift_metrics_resolution`: How often metrics should be gathered.
+
+## Additional variables to control resource limits
+Each metrics component (hawkular, cassandra, heapster) can specify a cpu and memory limits and requests by setting
+the corresponding role variable:
+```
+openshift_metrics_<COMPONENT>_(limits|requests)_(memory|cpu): <VALUE>
+```
+e.g
+```
+openshift_metrics_cassandra_limits_memory: 1G
+openshift_metrics_hawkular_requests_cpu: 100
+```
Dependencies
------------
openshift_facts
-openshift_examples
-openshift_master_facts
+
Example Playbook
----------------
diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml
new file mode 100644
index 000000000..b99adf779
--- /dev/null
+++ b/roles/openshift_metrics/defaults/main.yaml
@@ -0,0 +1,48 @@
+---
+openshift_metrics_start_cluster: True
+openshift_metrics_install_metrics: True
+openshift_metrics_image_prefix: docker.io/openshift/origin-
+openshift_metrics_image_version: latest
+openshift_metrics_startup_timeout: 500
+
+openshift_metrics_hawkular_replicas: 1
+openshift_metrics_hawkular_limits_memory: 2.5G
+openshift_metrics_hawkular_limits_cpu: null
+openshift_metrics_hawkular_requests_memory: 1.5G
+openshift_metrics_hawkular_requests_cpu: null
+openshift_metrics_hawkular_cert: ""
+openshift_metrics_hawkular_key: ""
+openshift_metrics_hawkular_ca: ""
+
+openshift_metrics_cassandra_replicas: 1
+openshift_metrics_cassandra_storage_type: emptydir
+openshift_metrics_cassandra_pv_size: 10Gi
+openshift_metrics_cassandra_limits_memory: 2G
+openshift_metrics_cassandra_limits_cpu: null
+openshift_metrics_cassandra_requests_memory: 1G
+openshift_metrics_cassandra_requests_cpu: null
+
+openshift_metrics_heapster_standalone: False
+openshift_metrics_heapster_limits_memory: 3.75G
+openshift_metrics_heapster_limits_cpu: null
+openshift_metrics_heapster_requests_memory: 0.9375G
+openshift_metrics_heapster_requests_cpu: null
+
+openshift_metrics_duration: 7
+openshift_metrics_resolution: 15s
+
+#####
+# Caution should be taken for the following defaults before
+# overriding the values here
+#####
+
+openshift_metrics_certs_dir: "{{ openshift.common.config_base }}/master/metrics"
+openshift_metrics_master_url: https://kubernetes.default.svc.cluster.local
+openshift_metrics_node_id: nodename
+openshift_metrics_project: openshift-infra
+
+openshift_metrics_cassandra_pv_prefix: metrics-cassandra
+
+openshift_metrics_hawkular_user_write_access: False
+
+openshift_metrics_heapster_allowed_users: system:master-proxy
diff --git a/roles/openshift_metrics/files/import_jks_certs.sh b/roles/openshift_metrics/files/import_jks_certs.sh
new file mode 100755
index 000000000..f4315ef34
--- /dev/null
+++ b/roles/openshift_metrics/files/import_jks_certs.sh
@@ -0,0 +1,116 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+set -ex
+
+function import_certs() {
+ dir=$CERT_DIR
+ hawkular_metrics_keystore_password=$(echo $METRICS_KEYSTORE_PASSWD | base64 -d)
+ hawkular_cassandra_keystore_password=$(echo $CASSANDRA_KEYSTORE_PASSWD | base64 -d)
+ hawkular_metrics_truststore_password=$(echo $METRICS_TRUSTSTORE_PASSWD | base64 -d)
+ hawkular_cassandra_truststore_password=$(echo $CASSANDRA_TRUSTSTORE_PASSWD | base64 -d)
+ hawkular_jgroups_password=$(echo $JGROUPS_PASSWD | base64 -d)
+
+ cassandra_alias=`keytool -noprompt -list -keystore $dir/hawkular-cassandra.truststore -storepass ${hawkular_cassandra_truststore_password} | sed -n '7~2s/,.*$//p'`
+ hawkular_alias=`keytool -noprompt -list -keystore $dir/hawkular-metrics.truststore -storepass ${hawkular_metrics_truststore_password} | sed -n '7~2s/,.*$//p'`
+
+ if [ ! -f $dir/hawkular-metrics.keystore ]; then
+ echo "Creating the Hawkular Metrics keystore from the PEM file"
+ keytool -importkeystore -v \
+ -srckeystore $dir/hawkular-metrics.pkcs12 \
+ -destkeystore $dir/hawkular-metrics.keystore \
+ -srcstoretype PKCS12 \
+ -deststoretype JKS \
+ -srcstorepass $hawkular_metrics_keystore_password \
+ -deststorepass $hawkular_metrics_keystore_password
+ fi
+
+ if [ ! -f $dir/hawkular-cassandra.keystore ]; then
+ echo "Creating the Hawkular Cassandra keystore from the PEM file"
+ keytool -importkeystore -v \
+ -srckeystore $dir/hawkular-cassandra.pkcs12 \
+ -destkeystore $dir/hawkular-cassandra.keystore \
+ -srcstoretype PKCS12 \
+ -deststoretype JKS \
+ -srcstorepass $hawkular_cassandra_keystore_password \
+ -deststorepass $hawkular_cassandra_keystore_password
+ fi
+
+ if [[ ! ${cassandra_alias[*]} =~ hawkular-metrics ]]; then
+ echo "Importing the Hawkular Certificate into the Cassandra Truststore"
+ keytool -noprompt -import -v -trustcacerts -alias hawkular-metrics \
+ -file $dir/hawkular-metrics.crt \
+ -keystore $dir/hawkular-cassandra.truststore \
+ -trustcacerts \
+ -storepass $hawkular_cassandra_truststore_password
+ fi
+
+ if [[ ! ${hawkular_alias[*]} =~ hawkular-cassandra ]]; then
+ echo "Importing the Cassandra Certificate into the Hawkular Truststore"
+ keytool -noprompt -import -v -trustcacerts -alias hawkular-cassandra \
+ -file $dir/hawkular-cassandra.crt \
+ -keystore $dir/hawkular-metrics.truststore \
+ -trustcacerts \
+ -storepass $hawkular_metrics_truststore_password
+ fi
+
+ if [[ ! ${cassandra_alias[*]} =~ hawkular-cassandra ]]; then
+ echo "Importing the Hawkular Cassandra Certificate into the Cassandra Truststore"
+ keytool -noprompt -import -v -trustcacerts -alias hawkular-cassandra \
+ -file $dir/hawkular-cassandra.crt \
+ -keystore $dir/hawkular-cassandra.truststore \
+ -trustcacerts \
+ -storepass $hawkular_cassandra_truststore_password
+ fi
+
+ cert_alias_names=(ca metricca cassandraca)
+
+ for cert_alias in ${cert_alias_names[*]}; do
+ if [[ ! ${cassandra_alias[*]} =~ "$cert_alias" ]]; then
+ echo "Importing the CA Certificate with alias $cert_alias into the Cassandra Truststore"
+ keytool -noprompt -import -v -trustcacerts -alias $cert_alias \
+ -file ${dir}/ca.crt \
+ -keystore $dir/hawkular-cassandra.truststore \
+ -trustcacerts \
+ -storepass $hawkular_cassandra_truststore_password
+ fi
+ done
+
+ for cert_alias in ${cert_alias_names[*]}; do
+ if [[ ! ${hawkular_alias[*]} =~ "$cert_alias" ]]; then
+ echo "Importing the CA Certificate with alias $cert_alias into the Hawkular Metrics Truststore"
+ keytool -noprompt -import -v -trustcacerts -alias $cert_alias \
+ -file ${dir}/ca.crt \
+ -keystore $dir/hawkular-metrics.truststore \
+ -trustcacerts \
+ -storepass $hawkular_metrics_truststore_password
+ fi
+ done
+
+ if [ ! -f $dir/hawkular-jgroups.keystore ]; then
+ echo "Generating the jgroups keystore"
+ keytool -genseckey -alias hawkular -keypass ${hawkular_jgroups_password} \
+ -storepass ${hawkular_jgroups_password} \
+ -keyalg Blowfish \
+ -keysize 56 \
+ -keystore $dir/hawkular-jgroups.keystore \
+ -storetype JCEKS
+ fi
+}
+
+import_certs
diff --git a/roles/openshift_metrics/meta/main.yaml b/roles/openshift_metrics/meta/main.yaml
index debca3ca6..68e94992e 100644
--- a/roles/openshift_metrics/meta/main.yaml
+++ b/roles/openshift_metrics/meta/main.yaml
@@ -1,18 +1,18 @@
---
galaxy_info:
- author: David Martín
- description:
- company:
- license: Apache License, Version 2.0
+ author: OpenShift Development <dev@lists.openshift.redhat.com>
+ description: Deploy OpenShift metrics integration for the cluster
+ company: Red Hat, Inc.
+ license: license (Apache)
min_ansible_version: 2.2
platforms:
- name: EL
versions:
- 7
+ - name: Fedora
+ versions:
+ - all
categories:
- - cloud
- - system
+ - openshift
dependencies:
-- { role: openshift_examples }
- { role: openshift_facts }
-- { role: openshift_master_facts }
diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml
new file mode 100644
index 000000000..16a967aa7
--- /dev/null
+++ b/roles/openshift_metrics/tasks/generate_certificates.yaml
@@ -0,0 +1,26 @@
+---
+- name: create certificate output directory
+ file:
+ path: "{{ openshift_metrics_certs_dir }}"
+ state: directory
+ mode: 0700
+
+- name: list existing secrets
+ command: >
+ {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }}
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ get secrets -o name
+ register: metrics_secrets
+ changed_when: false
+
+- name: generate ca certificate chain
+ shell: >
+ {{ openshift.common.admin_binary }} ca create-signer-cert
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ --key='{{ openshift_metrics_certs_dir }}/ca.key'
+ --cert='{{ openshift_metrics_certs_dir }}/ca.crt'
+ --serial='{{ openshift_metrics_certs_dir }}/ca.serial.txt'
+ --name="metrics-signer@$(date +%s)"
+ when: not '{{ openshift_metrics_certs_dir }}/ca.key' | exists
+- include: generate_heapster_certificates.yaml
+- include: generate_hawkular_certificates.yaml
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
new file mode 100644
index 000000000..9cf4afee0
--- /dev/null
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -0,0 +1,166 @@
+---
+- name: generate hawkular-metrics certificates
+ include: setup_certificate.yaml
+ vars:
+ component: hawkular-metrics
+ hostnames: "hawkular-metrics,{{ openshift_metrics_hawkular_hostname }}"
+ changed_when: no
+
+- name: generate hawkular-cassandra certificates
+ include: setup_certificate.yaml
+ vars:
+ component: hawkular-cassandra
+ hostnames: hawkular-cassandra
+ changed_when: no
+
+- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-cassandra-truststore.pwd
+ register: cassandra_truststore_password
+
+- slurp: src={{ openshift_metrics_certs_dir }}/hawkular-metrics-truststore.pwd
+ register: hawkular_truststore_password
+
+- name: generate password for hawkular metrics and jgroups
+ copy:
+ dest: '{{ openshift_metrics_certs_dir }}/{{ item }}.pwd'
+ content: "{{ 15 | oo_random_word }}"
+ with_items:
+ - hawkular-metrics
+ - hawkular-jgroups-keystore
+ when: not '{{ openshift_metrics_certs_dir }}/{{ item }}.pwd'|exists
+
+- name: generate htpasswd file for hawkular metrics
+ shell: >
+ htpasswd -ci
+ '{{ openshift_metrics_certs_dir }}/hawkular-metrics.htpasswd' hawkular
+ < '{{ openshift_metrics_certs_dir }}/hawkular-metrics.pwd'
+ when: >
+ not '{{ openshift_metrics_certs_dir }}/hawkular-metrics.htpasswd'|exists
+
+- include: import_jks_certs.yaml
+
+- name: read files for the hawkular-metrics secret
+ shell: >
+ printf '%s: ' '{{ item }}'
+ && base64 --wrap 0 '{{ openshift_metrics_certs_dir }}/{{ item }}'
+ register: hawkular_secrets
+ with_items:
+ - ca.crt
+ - hawkular-metrics.crt
+ - hawkular-metrics.keystore
+ - hawkular-metrics-keystore.pwd
+ - hawkular-metrics.truststore
+ - hawkular-metrics-truststore.pwd
+ - hawkular-metrics.pwd
+ - hawkular-metrics.htpasswd
+ - hawkular-jgroups.keystore
+ - hawkular-jgroups-keystore.pwd
+ - hawkular-cassandra.crt
+ - hawkular-cassandra.pem
+ - hawkular-cassandra.keystore
+ - hawkular-cassandra-keystore.pwd
+ - hawkular-cassandra.truststore
+ - hawkular-cassandra-truststore.pwd
+ changed_when: false
+
+- set_fact:
+ hawkular_secrets: |
+ {{ hawkular_secrets.results|map(attribute='stdout')|join('
+ ')|from_yaml }}
+
+- name: generate hawkular-metrics-secrets secret template
+ template:
+ src: secret.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular_metrics_secrets.yaml"
+ vars:
+ name: hawkular-metrics-secrets
+ labels:
+ metrics-infra: hawkular-metrics
+ data:
+ hawkular-metrics.keystore: >
+ {{ hawkular_secrets['hawkular-metrics.keystore'] }}
+ hawkular-metrics.keystore.password: >
+ {{ hawkular_secrets['hawkular-metrics-keystore.pwd'] }}
+ hawkular-metrics.truststore: >
+ {{ hawkular_secrets['hawkular-metrics.truststore'] }}
+ hawkular-metrics.truststore.password: >
+ {{ hawkular_secrets['hawkular-metrics-truststore.pwd'] }}
+ hawkular-metrics.keystore.alias: "{{ 'hawkular-metrics'|b64encode }}"
+ hawkular-metrics.htpasswd.file: >
+ {{ hawkular_secrets['hawkular-metrics.htpasswd'] }}
+ hawkular-metrics.jgroups.keystore: >
+ {{ hawkular_secrets['hawkular-jgroups.keystore'] }}
+ hawkular-metrics.jgroups.keystore.password: >
+ {{ hawkular_secrets['hawkular-jgroups-keystore.pwd'] }}
+ hawkular-metrics.jgroups.alias: "{{ 'hawkular'|b64encode }}"
+ when: name not in metrics_secrets.stdout_lines
+ changed_when: no
+
+- name: generate hawkular-metrics-certificate secret template
+ template:
+ src: secret.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular_metrics_certificate.yaml"
+ vars:
+ name: hawkular-metrics-certificate
+ labels:
+ metrics-infra: hawkular-metrics
+ data:
+ hawkular-metrics.certificate: >
+ {{ hawkular_secrets['hawkular-metrics.crt'] }}
+ hawkular-metrics-ca.certificate: >
+ {{ hawkular_secrets['ca.crt'] }}
+ when: name not in metrics_secrets.stdout_lines
+ changed_when: no
+
+- name: generate hawkular-metrics-account secret template
+ template:
+ src: secret.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular_metrics_account.yaml"
+ vars:
+ name: hawkular-metrics-account
+ labels:
+ metrics-infra: hawkular-metrics
+ data:
+ hawkular-metrics.username: "{{ 'hawkular'|b64encode }}"
+ hawkular-metrics.password: >
+ {{ hawkular_secrets['hawkular-metrics.pwd'] }}
+ when: name not in metrics_secrets.stdout_lines
+ changed_when: no
+
+- name: generate cassandra secret template
+ template:
+ src: secret.j2
+ dest: "{{ mktemp.stdout }}/templates/cassandra_secrets.yaml"
+ vars:
+ name: hawkular-cassandra-secrets
+ labels:
+ metrics-infra: hawkular-cassandra
+ data:
+ cassandra.keystore: >
+ {{ hawkular_secrets['hawkular-cassandra.keystore'] }}
+ cassandra.keystore.password: >
+ {{ hawkular_secrets['hawkular-cassandra-keystore.pwd'] }}
+ cassandra.keystore.alias: "{{ 'hawkular-cassandra'|b64encode }}"
+ cassandra.truststore: >
+ {{ hawkular_secrets['hawkular-cassandra.truststore'] }}
+ cassandra.truststore.password: >
+ {{ hawkular_secrets['hawkular-cassandra-truststore.pwd'] }}
+ cassandra.pem: >
+ {{ hawkular_secrets['hawkular-cassandra.pem'] }}
+ when: name not in metrics_secrets
+ changed_when: no
+
+- name: generate cassandra-certificate secret template
+ template:
+ src: secret.j2
+ dest: "{{ mktemp.stdout }}/templates/cassandra_certificate.yaml"
+ vars:
+ name: hawkular-cassandra-certificate
+ labels:
+ metrics-infra: hawkular-cassandra
+ data:
+ cassandra.certificate: >
+ {{ hawkular_secrets['hawkular-cassandra.crt'] }}
+ cassandra-ca.certificate: >
+ {{ hawkular_secrets['hawkular-cassandra.pem'] }}
+ when: name not in metrics_secrets.stdout_lines
+ changed_when: no
diff --git a/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml b/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml
new file mode 100644
index 000000000..2449b1518
--- /dev/null
+++ b/roles/openshift_metrics/tasks/generate_heapster_certificates.yaml
@@ -0,0 +1,41 @@
+---
+- name: generate heapster key/cert
+ command: >
+ {{ openshift.common.admin_binary }} ca create-server-cert
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ --key='{{ openshift_metrics_certs_dir }}/heapster.key'
+ --cert='{{ openshift_metrics_certs_dir }}/heapster.cert'
+ --hostnames=heapster
+ --signer-cert='{{ openshift_metrics_certs_dir }}/ca.crt'
+ --signer-key='{{ openshift_metrics_certs_dir }}/ca.key'
+ --signer-serial='{{ openshift_metrics_certs_dir }}/ca.serial.txt'
+ when: not '{{ openshift_metrics_certs_dir }}/heapster.key' | exists
+
+- when: "'secret/heapster-secrets' not in metrics_secrets.stdout_lines"
+ block:
+ - name: read files for the heapster secret
+ slurp: src={{ item }}
+ register: heapster_secret
+ with_items:
+ - "{{ openshift_metrics_certs_dir }}/heapster.cert"
+ - "{{ openshift_metrics_certs_dir }}/heapster.key"
+ - "{{ client_ca }}"
+ vars:
+ custom_ca: "{{ openshift_metrics_certs_dir }}/heapster_client_ca.crt"
+ default_ca: "{{ openshift.common.config_base }}/master/ca-bundle.crt"
+ client_ca: "{{ custom_ca|exists|ternary(custom_ca, default_ca) }}"
+ - name: generate heapster secret template
+ template:
+ src: secret.j2
+ dest: "{{ mktemp.stdout }}/templates/heapster_secrets.yaml"
+ force: no
+ vars:
+ name: heapster-secrets
+ labels:
+ metrics-infra: heapster
+ data:
+ heapster.cert: "{{ heapster_secret.results[0].content }}"
+ heapster.key: "{{ heapster_secret.results[1].content }}"
+ heapster.client-ca: "{{ heapster_secret.results[2].content }}"
+ heapster.allowed-users: >
+ {{ openshift_metrics_heapster_allowed_users|b64encode }}
diff --git a/roles/openshift_metrics/tasks/generate_rolebindings.yaml b/roles/openshift_metrics/tasks/generate_rolebindings.yaml
new file mode 100644
index 000000000..6524c3f32
--- /dev/null
+++ b/roles/openshift_metrics/tasks/generate_rolebindings.yaml
@@ -0,0 +1,33 @@
+---
+- name: generate view role binding for the hawkular service account
+ template:
+ src: rolebinding.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular-rolebinding.yaml"
+ vars:
+ obj_name: hawkular-view
+ labels:
+ metrics-infra: hawkular
+ roleRef:
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: hawkular
+ changed_when: no
+
+- name: generate cluster-reader role binding for the heapster service account
+ template:
+ src: rolebinding.j2
+ dest: "{{ mktemp.stdout }}/templates/heapster-rolebinding.yaml"
+ vars:
+ cluster: True
+ obj_name: heapster-cluster-reader
+ labels:
+ metrics-infra: heapster
+ roleRef:
+ kind: ClusterRole
+ name: cluster-reader
+ subjects:
+ - kind: ServiceAccount
+ name: heapster
+ namespace: "{{ openshift_metrics_project }}"
+ changed_when: no
diff --git a/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml b/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml
new file mode 100644
index 000000000..94f34d860
--- /dev/null
+++ b/roles/openshift_metrics/tasks/generate_serviceaccounts.yaml
@@ -0,0 +1,27 @@
+---
+- name: Generating serviceaccounts for hawkular metrics/cassandra
+ template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/metrics-{{obj_name}}-sa.yaml
+ vars:
+ obj_name: "{{item.name}}"
+ labels:
+ metrics-infra: support
+ secrets:
+ - hawkular-{{item.secret}}-secrets
+ with_items:
+ - name: hawkular
+ secret: hawkular-metrics-secrets
+ - name: cassandra
+ secret: hawkular-cassandra-secrets
+ changed_when: no
+
+- name: Generating serviceaccount for heapster
+ template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/metrics-{{obj_name}}-sa.yaml
+ vars:
+ obj_name: heapster
+ labels:
+ metrics-infra: support
+ secrets:
+ - heapster-secrets
+ - hawkular-metrics-certificate
+ - hawkular-metrics-account
+ changed_when: no
diff --git a/roles/openshift_metrics/tasks/generate_services.yaml b/roles/openshift_metrics/tasks/generate_services.yaml
new file mode 100644
index 000000000..903d52bff
--- /dev/null
+++ b/roles/openshift_metrics/tasks/generate_services.yaml
@@ -0,0 +1,46 @@
+---
+- name: Generate service for heapster
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/metrics-{{obj_name}}-svc.yaml
+ vars:
+ obj_name: heapster
+ ports:
+ - {port: 80, targetPort: http-endpoint}
+ selector:
+ name: "{{obj_name}}"
+ labels:
+ metrics-infra: "{{obj_name}}"
+ name: "{{obj_name}}"
+ changed_when: no
+
+- name: Generate service for hawkular-metrics
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/metrics-{{obj_name}}-svc.yaml
+ vars:
+ obj_name: hawkular-metrics
+ ports:
+ - {port: 443, targetPort: https-endpoint}
+ selector:
+ name: "{{obj_name}}"
+ labels:
+ metrics-infra: "{{obj_name}}"
+ name: "{{obj_name}}"
+ changed_when: no
+
+- name: Generate services for cassandra
+ template: src=service.j2 dest={{mktemp.stdout}}/templates/metrics-{{obj_name}}-svc.yaml
+ vars:
+ obj_name: hawkular-{{item}}
+ ports:
+ - {name: cql-port, port: 9042, targetPort: cql-port}
+ - {name: thrift-port, port: 9160, targetPort: thrift-port}
+ - {name: tcp-port, port: 7000, targetPort: tcp-port}
+ - {name: ssl-port, port: 7001, targetPort: ssl-port}
+ selector:
+ type: hawkular-cassandra
+ labels:
+ metrics-infra: hawkular-cassandra
+ name: hawkular-cassandra
+ headless: "{{ item == 'cassandra-nodes' }}"
+ with_items:
+ - cassandra
+ - cassandra-nodes
+ changed_when: no
diff --git a/roles/openshift_metrics/tasks/import_jks_certs.yaml b/roles/openshift_metrics/tasks/import_jks_certs.yaml
new file mode 100644
index 000000000..f5192b005
--- /dev/null
+++ b/roles/openshift_metrics/tasks/import_jks_certs.yaml
@@ -0,0 +1,72 @@
+---
+- stat: path="{{openshift_metrics_certs_dir}}/hawkular-cassandra.keystore"
+ register: cassandra_keystore
+ check_mode: no
+
+- stat: path="{{openshift_metrics_certs_dir}}/hawkular-cassandra.truststore"
+ register: cassandra_truststore
+ check_mode: no
+
+- stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.keystore"
+ register: metrics_keystore
+ check_mode: no
+
+- stat: path="{{openshift_metrics_certs_dir}}/hawkular-metrics.truststore"
+ register: metrics_truststore
+ check_mode: no
+
+- stat: path="{{openshift_metrics_certs_dir}}/hawkular-jgroups.keystore"
+ register: jgroups_keystore
+ check_mode: no
+
+- block:
+ - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-metrics-keystore.pwd
+ register: metrics_keystore_password
+
+ - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-cassandra-keystore.pwd
+ register: cassandra_keystore_password
+
+ - slurp: src={{ openshift_metrics_certs_dir }}/hawkular-jgroups-keystore.pwd
+ register: jgroups_keystore_password
+
+ - local_action: command mktemp -d
+ register: local_tmp
+ changed_when: False
+
+ - fetch:
+ dest: "{{local_tmp.stdout}}/"
+ src: "{{ openshift_metrics_certs_dir }}/{{item}}"
+ flat: yes
+ changed_when: False
+ with_items:
+ - hawkular-metrics.pkcs12
+ - hawkular-cassandra.pkcs12
+ - hawkular-metrics.crt
+ - hawkular-cassandra.crt
+ - ca.crt
+
+ - local_action: command {{role_path}}/files/import_jks_certs.sh
+ environment:
+ CERT_DIR: "{{local_tmp.stdout}}"
+ METRICS_KEYSTORE_PASSWD: "{{metrics_keystore_password.content}}"
+ CASSANDRA_KEYSTORE_PASSWD: "{{cassandra_keystore_password.content}}"
+ METRICS_TRUSTSTORE_PASSWD: "{{hawkular_truststore_password.content}}"
+ CASSANDRA_TRUSTSTORE_PASSWD: "{{cassandra_truststore_password.content}}"
+ JGROUPS_PASSWD: "{{jgroups_keystore_password.content}}"
+ changed_when: False
+
+ - copy:
+ dest: "{{openshift_metrics_certs_dir}}/"
+ src: "{{item}}"
+ with_fileglob: "{{local_tmp.stdout}}/*.*store"
+
+ - file:
+ path: "{{local_tmp.stdout}}"
+ state: absent
+ changed_when: False
+
+ when: not metrics_keystore.stat.exists or
+ not metrics_truststore.stat.exists or
+ not cassandra_keystore.stat.exists or
+ not cassandra_truststore.stat.exists or
+ not jgroups_keystore.stat.exists
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
new file mode 100644
index 000000000..a9340acc3
--- /dev/null
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -0,0 +1,54 @@
+---
+- shell: >
+ {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }}
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ get rc hawkular-cassandra-{{node}} -o jsonpath='{.spec.replicas}' || echo 0
+ vars:
+ node: "{{ item }}"
+ register: cassandra_replica_count
+ with_sequence: count={{ openshift_metrics_cassandra_replicas }}
+ changed_when: false
+ failed_when: false
+
+- name: generate hawkular-cassandra replication controllers
+ template:
+ src: hawkular_cassandra_rc.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-rc{{ item }}.yaml"
+ vars:
+ node: "{{ item }}"
+ master: "{{ (item == '1')|string|lower }}"
+ replica_count: "{{cassandra_replica_count.results[item|int - 1].stdout}}"
+ with_sequence: count={{ openshift_metrics_cassandra_replicas }}
+ changed_when: false
+
+- name: generate hawkular-cassandra persistent volume claims
+ template:
+ src: pvc.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ item }}.yaml"
+ vars:
+ obj_name: "{{ openshift_metrics_cassandra_pv_prefix }}-{{ item }}"
+ labels:
+ metrics-infra: hawkular-cassandra
+ access_modes:
+ - ReadWriteOnce
+ size: "{{ openshift_metrics_cassandra_pv_size }}"
+ with_sequence: count={{ openshift_metrics_cassandra_replicas }}
+ when: openshift_metrics_cassandra_storage_type == 'pv'
+ changed_when: false
+
+- name: generate hawkular-cassandra persistent volume claims (dynamic)
+ template:
+ src: pvc.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ item }}.yaml"
+ vars:
+ obj_name: "{{ openshift_metrics_cassandra_pv_prefix }}-{{ item }}"
+ labels:
+ metrics-infra: hawkular-cassandra
+ annotations:
+ volume.alpha.kubernetes.io/storage-class: dynamic
+ access_modes:
+ - ReadWriteOnce
+ size: "{{ openshift_metrics_cassandra_pv_size }}"
+ with_sequence: count={{ openshift_metrics_cassandra_replicas }}
+ when: openshift_metrics_cassandra_storage_type == 'dynamic'
+ changed_when: false
diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml
new file mode 100644
index 000000000..00f7b2554
--- /dev/null
+++ b/roles/openshift_metrics/tasks/install_hawkular.yaml
@@ -0,0 +1,54 @@
+---
+- command: >
+ {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }}
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ get rc hawkular-metrics -o jsonpath='{.spec.replicas}'
+ register: hawkular_metrics_replica_count
+ failed_when: false
+ changed_when: false
+
+- name: generate hawkular-metrics replication controller
+ template:
+ src: hawkular_metrics_rc.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular_metrics_rc.yaml"
+ vars:
+ replica_count: "{{hawkular_metrics_replica_count.stdout | default(0)}}"
+ changed_when: false
+
+- name: read hawkular-metrics route destination ca certificate
+ slurp: src={{ openshift_metrics_certs_dir }}/ca.crt
+ register: metrics_route_dest_ca_cert
+ changed_when: false
+
+- block:
+ - set_fact: hawkular_key={{ lookup('file', openshift_metrics_hawkular_key) }}
+ when: openshift_metrics_hawkular_key | exists
+ changed_when: false
+
+ - set_fact: hawkular_cert={{ lookup('file', openshift_metrics_hawkular_cert) }}
+ when: openshift_metrics_hawkular_cert | exists
+ changed_when: false
+
+ - set_fact: hawkular_ca={{ lookup('file', openshift_metrics_hawkular_ca) }}
+ when: openshift_metrics_hawkular_ca | exists
+ changed_when: false
+
+ - name: generate the hawkular-metrics route
+ template:
+ src: route.j2
+ dest: "{{ mktemp.stdout }}/templates/hawkular-metrics-route.yaml"
+ vars:
+ name: hawkular-metrics
+ labels:
+ metrics-infra: hawkular-metrics
+ host: "{{ openshift_metrics_hawkular_hostname }}"
+ to:
+ kind: Service
+ name: hawkular-metrics
+ tls:
+ termination: reencrypt
+ key: "{{ hawkular_key | default('') }}"
+ certificate: "{{ hawkular_cert | default('') }}"
+ ca_certificate: "{{ hawkular_ca | default('') }}"
+ destination_ca_certificate: "{{ metrics_route_dest_ca_cert.content | b64decode }}"
+ changed_when: false
diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml
new file mode 100644
index 000000000..39df797ab
--- /dev/null
+++ b/roles/openshift_metrics/tasks/install_heapster.yaml
@@ -0,0 +1,14 @@
+---
+- command: >
+ {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }}
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ get rc heapster -o jsonpath='{.spec.replicas}'
+ register: heapster_replica_count
+ failed_when: false
+ changed_when: no
+
+- name: Generate heapster replication controller
+ template: src=heapster.j2 dest={{mktemp.stdout}}/templates/metrics-heapster-rc.yaml
+ vars:
+ replica_count: "{{heapster_replica_count.stdout | default(0)}}"
+ changed_when: no
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
new file mode 100644
index 000000000..ddaa54438
--- /dev/null
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -0,0 +1,45 @@
+---
+- name: Check that hawkular_metrics_hostname is set
+ fail: msg='the openshift_metrics_hawkular_hostname variable is required'
+ when: openshift_metrics_hawkular_hostname is not defined
+
+- name: Check the value of openshift_metrics_cassandra_storage_type
+ fail:
+ msg: >
+ openshift_metrics_cassandra_storage_type ({{ openshift_metrics_cassandra_storage_type }})
+ is invalid, must be one of: emptydir, pv, dynamic
+ when: openshift_metrics_cassandra_storage_type not in openshift_metrics_cassandra_storage_types
+
+- name: Install Metrics
+ include: "{{ role_path }}/tasks/install_{{ include_file }}.yaml"
+ with_items:
+ - support
+ - heapster
+ - hawkular
+ - cassandra
+ loop_control:
+ loop_var: include_file
+
+- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
+ register: object_def_files
+ changed_when: no
+
+- slurp: src={{item.path}}
+ register: object_defs
+ with_items: "{{object_def_files.files}}"
+ changed_when: no
+
+- name: Create objects
+ include: oc_apply.yaml
+ vars:
+ kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ namespace: "{{ openshift_metrics_project }}"
+ file_name: "{{ item.source }}"
+ file_content: "{{ item.content | b64decode | from_yaml }}"
+ with_items: "{{ object_defs.results }}"
+
+- name: Scaling up cluster
+ include: start_metrics.yaml
+ tags: openshift_metrics_start_cluster
+ when:
+ - openshift_metrics_start_cluster | default(true) | bool
diff --git a/roles/openshift_metrics/tasks/install_support.yaml b/roles/openshift_metrics/tasks/install_support.yaml
new file mode 100644
index 000000000..cc5acc6e5
--- /dev/null
+++ b/roles/openshift_metrics/tasks/install_support.yaml
@@ -0,0 +1,23 @@
+---
+- name: Check control node to see if htpasswd is installed
+ local_action: command which htpasswd
+ register: htpasswd_check
+ failed_when: no
+ changed_when: no
+
+- fail: msg="'htpasswd' is unavailable. Please install httpd-tools on the control node"
+ when: htpasswd_check.rc == 1
+
+- name: Check control node to see if keytool is installed
+ local_action: command which htpasswd
+ register: keytool_check
+ failed_when: no
+ changed_when: no
+
+- fail: msg="'keytool' is unavailable. Please install java-1.8.0-openjdk-headless on the control node"
+ when: keytool_check.rc == 1
+
+- include: generate_certificates.yaml
+- include: generate_serviceaccounts.yaml
+- include: generate_services.yaml
+- include: generate_rolebindings.yaml
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 68e4a48b9..c42440130 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -1,75 +1,22 @@
---
-- name: Create temp directory for kubeconfig
- command: mktemp -d /tmp/openshift-ansible-XXXXXX
+- name: Create temp directory for doing work in
+ command: mktemp -td openshift-metrics-ansible-XXXXXX
register: mktemp
changed_when: False
-- name: Record kubeconfig tmp dir
- set_fact:
- openshift_metrics_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+- name: Create temp directory for all our templates
+ file: path={{mktemp.stdout}}/templates state=directory mode=0755
+ changed_when: False
- name: Copy the admin client config(s)
command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ openshift_metrics_kubeconfig }}
+ cp {{ openshift.common.config_base}}/master/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
changed_when: False
+ check_mode: no
+ tags: metrics_init
-- name: Set hosted metrics facts
- openshift_facts:
- role: hosted
- openshift_env: "{{ hostvars
- | oo_merge_hostvars(vars, inventory_hostname)
- | oo_openshift_env }}"
- openshift_env_structures:
- - 'openshift.hosted.metrics.*'
-
-- set_fact:
- metrics_persistence: "{{ openshift.hosted.metrics.storage_kind | default(none) is not none }}"
- metrics_dynamic_vol: "{{ openshift.hosted.metrics.storage_kind | default(none) == 'dynamic' }}"
- metrics_template_dir: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples/infrastructure-templates/{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
- image_prefix: "{{ '-v IMAGE_PREFIX=' ~ openshift.hosted.metrics.deployer.prefix if 'prefix' in openshift.hosted.metrics.deployer else '' }}"
- image_version: "{{ '-v IMAGE_VERSION=' ~ openshift.hosted.metrics.deployer.version if 'version' in openshift.hosted.metrics.deployer else '' }}"
-
-
-- name: Check for existing metrics pods
- shell: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
- --namespace openshift-infra
- get pods -l {{ item }} | grep -q Running
- register: metrics_pods_status
- with_items:
- - metrics-infra=hawkular-metrics
- - metrics-infra=heapster
- - metrics-infra=hawkular-cassandra
- failed_when: false
- changed_when: false
-
-- name: Check for previous deployer
- shell: >
- {{ openshift.common.client_binary }}
- --config={{ openshift_metrics_kubeconfig }}
- --namespace openshift-infra
- get pods -l metrics-infra=deployer --sort-by='{.metadata.creationTimestamp}' | tail -1 | grep metrics-deployer-
- register: metrics_deployer_status
- failed_when: false
- changed_when: false
+- include: install_metrics.yaml
+ when: openshift_metrics_install_metrics | default(false) | bool
-- name: Record current deployment status
- set_fact:
- greenfield: "{{ not metrics_deployer_status.rc == 0 }}"
- failed_error: "{{ True if 'Error' in metrics_deployer_status.stdout else False }}"
- metrics_running: "{{ metrics_pods_status.results | oo_collect(attribute='rc') == [0,0,0] }}"
-
-- name: Set deployment mode
- set_fact:
- deployment_mode: "{{ 'refresh' if (failed_error | bool or metrics_upgrade | bool) else 'deploy' }}"
-
-# TODO: handle non greenfield deployments in the future
-- include: install.yml
- when: greenfield
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- changed_when: False
+- include: uninstall_metrics.yaml
+ when: not openshift_metrics_install_metrics | default(false) | bool
diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml
new file mode 100644
index 000000000..dd67703b4
--- /dev/null
+++ b/roles/openshift_metrics/tasks/oc_apply.yaml
@@ -0,0 +1,32 @@
+---
+- name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}}
+ register: generation_init
+ failed_when: false
+ changed_when: no
+
+- name: Applying {{file_name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ apply -f {{ file_name }}
+ -n {{ openshift_metrics_project }}
+ register: generation_apply
+ failed_when: "'error' in generation_apply.stderr"
+ changed_when: no
+
+- name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ kubeconfig }}
+ get {{file_content.kind}} {{file_content.metadata.name}}
+ -o jsonpath='{.metadata.resourceVersion}'
+ -n {{namespace}}
+ register: version_changed
+ vars:
+ init_version: "{{ (generation_init is defined) | ternary(generation_init.stdout, '0') }}"
+ failed_when: "'error' in version_changed.stderr"
+ changed_when: version_changed.stdout | int > init_version | int
diff --git a/roles/openshift_metrics/tasks/scale.yaml b/roles/openshift_metrics/tasks/scale.yaml
new file mode 100644
index 000000000..bb4fa621b
--- /dev/null
+++ b/roles/openshift_metrics/tasks/scale.yaml
@@ -0,0 +1,30 @@
+---
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get {{object}}
+ -o jsonpath='{.spec.replicas}' -n {{openshift_metrics_project}}
+ register: replica_count
+ failed_when: "replica_count.rc == 1 and 'exists' not in replica_count.stderr"
+ when: not ansible_check_mode
+ changed_when: no
+
+- command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig scale {{object}}
+ --replicas={{desired}} -n {{openshift_metrics_project}}
+ register: scale_result
+ failed_when: scale_result.rc == 1 and 'exists' not in scale_result.stderr
+ when:
+ - replica_count.stdout != (desired | string)
+ - not ansible_check_mode
+ changed_when: no
+
+- name: Waiting for {{object}} to scale to {{desired}}
+ command: >
+ {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ get {{object}} -n {{openshift_metrics_project|quote}} -o jsonpath='{.status.replicas}'
+ register: replica_counts
+ until: replica_counts.stdout.find("{{desired}}") != -1
+ retries: 30
+ delay: 10
+ when:
+ - replica_count.stdout != (desired | string)
+ - not ansible_check_mode
diff --git a/roles/openshift_metrics/tasks/setup_certificate.yaml b/roles/openshift_metrics/tasks/setup_certificate.yaml
new file mode 100644
index 000000000..5ca8f4462
--- /dev/null
+++ b/roles/openshift_metrics/tasks/setup_certificate.yaml
@@ -0,0 +1,52 @@
+---
+- name: generate {{ component }} keys
+ command: >
+ {{ openshift.common.admin_binary }} ca create-server-cert
+ --config={{ mktemp.stdout }}/admin.kubeconfig
+ --key='{{ openshift_metrics_certs_dir }}/{{ component }}.key'
+ --cert='{{ openshift_metrics_certs_dir }}/{{ component }}.crt'
+ --hostnames='{{ hostnames }}'
+ --signer-cert='{{ openshift_metrics_certs_dir }}/ca.crt'
+ --signer-key='{{ openshift_metrics_certs_dir }}/ca.key'
+ --signer-serial='{{ openshift_metrics_certs_dir }}/ca.serial.txt'
+ when: not '{{ openshift_metrics_certs_dir }}/{{ component }}.key'|exists
+
+- slurp: src={{item}}
+ register: component_certs
+ with_items:
+ - '{{ openshift_metrics_certs_dir | quote }}/{{ component|quote }}.key'
+ - '{{ openshift_metrics_certs_dir | quote }}/{{ component|quote }}.crt'
+ when: not '{{ openshift_metrics_certs_dir }}/{{ component }}.pem'|exists
+
+- name: generate {{ component }} certificate
+ copy:
+ dest: '{{ openshift_metrics_certs_dir }}/{{ component }}.pem'
+ content: "{{ component_certs.results | map(attribute='content') | map('b64decode') | join('') }}"
+ when: not '{{ openshift_metrics_certs_dir }}/{{ component }}.pem'|exists
+
+- name: generate random password for the {{ component }} keystore
+ copy:
+ content: "{{ 15 | oo_random_word }}"
+ dest: '{{ openshift_metrics_certs_dir }}/{{ component }}-keystore.pwd'
+ when: >
+ not '{{ openshift_metrics_certs_dir }}/{{ component }}-keystore.pwd'|exists
+
+- slurp: src={{ openshift_metrics_certs_dir | quote }}/{{ component|quote }}-keystore.pwd
+ register: keystore_password
+
+- name: create the {{ component }} pkcs12 from the pem file
+ command: >
+ openssl pkcs12 -export
+ -in '{{ openshift_metrics_certs_dir }}/{{ component }}.pem'
+ -out '{{ openshift_metrics_certs_dir }}/{{ component }}.pkcs12'
+ -name '{{ component }}' -noiter -nomaciter
+ -password 'pass:{{keystore_password.content | b64decode }}'
+ when: not '{{ openshift_metrics_certs_dir }}/{{ component }}.pkcs12'|exists
+
+- name: generate random password for the {{ component }} truststore
+ copy:
+ content: "{{ 15 | oo_random_word }}"
+ dest: '{{ openshift_metrics_certs_dir | quote }}/{{ component|quote }}-truststore.pwd'
+ when: >
+ not
+ '{{ openshift_metrics_certs_dir | quote }}/{{ component| quote }}-truststore.pwd'|exists
diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml
new file mode 100644
index 000000000..c4cae4aff
--- /dev/null
+++ b/roles/openshift_metrics/tasks/start_metrics.yaml
@@ -0,0 +1,54 @@
+---
+- command: >
+ {{openshift.common.client_binary}}
+ --config={{mktemp.stdout}}/admin.kubeconfig
+ get rc
+ -l metrics-infra=hawkular-cassandra
+ -o name
+ -n {{openshift_metrics_project}}
+ register: metrics_cassandra_rc
+ changed_when: no
+
+- name: Start Hawkular Cassandra
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{metrics_cassandra_rc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}}
+ --config={{mktemp.stdout}}/admin.kubeconfig
+ get rc
+ -l metrics-infra=hawkular-metrics
+ -o name
+ -n {{openshift_metrics_project}}
+ register: metrics_metrics_rc
+ changed_when: no
+
+- name: Start Hawkular Metrics
+ include: scale.yaml
+ vars:
+ desired: "{{openshift_metrics_hawkular_replicas}}"
+ with_items: "{{metrics_metrics_rc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}}
+ --config={{mktemp.stdout}}/admin.kubeconfig
+ get rc
+ -l metrics-infra=heapster
+ -o name
+ -n {{openshift_metrics_project}}
+ register: metrics_heapster_rc
+ changed_when: no
+
+- name: Start Heapster
+ include: scale.yaml
+ vars:
+ desired: 1
+ with_items: "{{metrics_heapster_rc.stdout_lines}}"
+ loop_control:
+ loop_var: object
diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml
new file mode 100644
index 000000000..bae181e3e
--- /dev/null
+++ b/roles/openshift_metrics/tasks/stop_metrics.yaml
@@ -0,0 +1,55 @@
+---
+- command: >
+ {{openshift.common.client_binary}}
+ --config={{mktemp.stdout}}/admin.kubeconfig
+ get rc
+ -l metrics-infra=heapster
+ -o name
+ -n {{openshift_metrics_project}}
+ register: metrics_heapster_rc
+ changed_when: "'No resources found' not in metrics_heapster_rc.stderr"
+ check_mode: no
+
+- name: Stop Heapster
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{metrics_heapster_rc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}}
+ --config={{mktemp.stdout}}/admin.kubeconfig
+ get rc
+ -l metrics-infra=hawkular-metrics
+ -o name
+ -n {{openshift_metrics_project}}
+ register: metrics_hawkular_rc
+ changed_when: "'No resources found' not in metrics_hawkular_rc.stderr"
+
+- name: Stop Hawkular Metrics
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{metrics_hawkular_rc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+
+- command: >
+ {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig
+ get rc
+ -o name
+ -l metrics-infra=hawkular-cassandra
+ -n {{openshift_metrics_project}}
+ register: metrics_cassandra_rc
+ changed_when: "'No resources found' not in metrics_cassandra_rc.stderr"
+
+- name: Stop Hawkular Cassandra
+ include: scale.yaml
+ vars:
+ desired: 0
+ with_items: "{{metrics_cassandra_rc.stdout_lines}}"
+ loop_control:
+ loop_var: object
+ when: metrics_cassandra_rc is defined
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
new file mode 100644
index 000000000..8a6be6237
--- /dev/null
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -0,0 +1,19 @@
+---
+- name: stop metrics
+ include: stop_metrics.yaml
+
+- name: remove metrics components
+ command: >
+ {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete --ignore-not-found --selector=metrics-infra
+ all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings
+ register: delete_metrics
+ changed_when: "delete_metrics.stdout != 'No resources found'"
+
+- name: remove rolebindings
+ command: >
+ {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig
+ delete --ignore-not-found
+ rolebinding/hawkular-view
+ clusterrolebinding/heapster-cluster-reader
+ changed_when: "delete_metrics.stdout != 'No resources found'"
diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
new file mode 100644
index 000000000..abd4ff939
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
@@ -0,0 +1,125 @@
+apiVersion: v1
+kind: ReplicationController
+metadata:
+ name: hawkular-cassandra-{{ node }}
+ labels:
+ metrics-infra: hawkular-cassandra
+ name: hawkular-cassandra
+ type: hawkular-cassandra
+spec:
+ selector:
+ name: hawkular-cassandra-{{ node }}
+ replicas: {{replica_count}}
+ template:
+ version: v1
+ metadata:
+ labels:
+ metrics-infra: hawkular-cassandra
+ name: hawkular-cassandra-{{ node }}
+ type: hawkular-cassandra
+ spec:
+ serviceAccount: cassandra
+ containers:
+ - image: "{{ openshift_metrics_image_prefix }}metrics-cassandra:{{ openshift_metrics_image_version }}"
+ name: hawkular-cassandra-{{ node }}
+ ports:
+ - name: cql-port
+ containerPort: 9042
+ - name: thift-port
+ containerPort: 9160
+ - name: tcp-port
+ containerPort: 7000
+ - name: ssl-port
+ containerPort: 7001
+ command:
+ - "/opt/apache-cassandra/bin/cassandra-docker.sh"
+ - "--cluster_name=hawkular-metrics"
+ - "--data_volume=/cassandra_data"
+ - "--internode_encryption=all"
+ - "--require_node_auth=true"
+ - "--enable_client_encryption=true"
+ - "--require_client_auth=true"
+ - "--keystore_file=/secret/cassandra.keystore"
+ - "--keystore_password_file=/secret/cassandra.keystore.password"
+ - "--truststore_file=/secret/cassandra.truststore"
+ - "--truststore_password_file=/secret/cassandra.truststore.password"
+ - "--cassandra_pem_file=/secret/cassandra.pem"
+ env:
+ - name: CASSANDRA_MASTER
+ value: "{{ master }}"
+ - name: CASSANDRA_DATA_VOLUME
+ value: "/cassandra_data"
+ - name: JVM_OPTS
+ value: "-Dcassandra.commitlog.ignorereplayerrors=true"
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: MEMORY_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ resource: limits.memory
+ - name: CPU_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ resource: limits.cpu
+ divisor: 1m
+ volumeMounts:
+ - name: cassandra-data
+ mountPath: "/cassandra_data"
+ - name: hawkular-cassandra-secrets
+ mountPath: "/secret"
+{% if ((openshift_metrics_cassandra_limits_cpu is defined and openshift_metrics_cassandra_limits_cpu is not none)
+ or (openshift_metrics_cassandra_limits_memory is defined and openshift_metrics_cassandra_limits_memory is not none)
+ or (openshift_metrics_cassandra_requests_cpu is defined and openshift_metrics_cassandra_requests_cpu is not none)
+ or (openshift_metrics_cassandra_requests_memory is defined and openshift_metrics_cassandra_requests_memory is not none))
+%}
+ resources:
+{% if (openshift_metrics_cassandra_limits_cpu is not none
+ or openshift_metrics_cassandra_limits_memory is not none)
+%}
+ limits:
+{% if openshift_metrics_cassandra_limits_cpu is not none %}
+ cpu: "{{openshift_metrics_cassandra_limits_cpu}}"
+{% endif %}
+{% if openshift_metrics_cassandra_limits_memory is not none %}
+ memory: "{{openshift_metrics_cassandra_limits_memory}}"
+{% endif %}
+{% endif %}
+{% if (openshift_metrics_cassandra_requests_cpu is not none
+ or openshift_metrics_cassandra_requests_memory is not none)
+%}
+ requests:
+{% if openshift_metrics_cassandra_requests_cpu is not none %}
+ cpu: "{{openshift_metrics_cassandra_requests_cpu}}"
+{% endif %}
+{% if openshift_metrics_cassandra_requests_memory is not none %}
+ memory: "{{openshift_metrics_cassandra_requests_memory}}"
+{% endif %}
+{% endif %}
+{% endif %}
+ readinessProbe:
+ exec:
+ command:
+ - "/opt/apache-cassandra/bin/cassandra-docker-ready.sh"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/apache-cassandra/bin/cassandra-prestop.sh"
+ postStart:
+ exec:
+ command:
+ - "/opt/apache-cassandra/bin/cassandra-poststart.sh"
+ terminationGracePeriodSeconds: 1800
+ volumes:
+ - name: cassandra-data
+{% if openshift_metrics_cassandra_storage_type == 'emptydir' %}
+ emptyDir: {}
+{% else %}
+ persistentVolumeClaim:
+ claimName: "{{ openshift_metrics_cassandra_pv_prefix }}-{{ node }}"
+{% endif %}
+ - name: hawkular-cassandra-secrets
+ secret:
+ secretName: hawkular-cassandra-secrets
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
new file mode 100644
index 000000000..e6954ea44
--- /dev/null
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -0,0 +1,119 @@
+apiVersion: v1
+kind: ReplicationController
+metadata:
+ name: hawkular-metrics
+ labels:
+ metrics-infra: hawkular-metrics
+ name: hawkular-metrics
+spec:
+ selector:
+ name: hawkular-metrics
+ replicas: {{replica_count}}
+ template:
+ version: v1
+ metadata:
+ labels:
+ metrics-infra: hawkular-metrics
+ name: hawkular-metrics
+ spec:
+ serviceAccount: hawkular
+ containers:
+ - image: {{openshift_metrics_image_prefix}}metrics-hawkular-metrics:{{openshift_metrics_image_version}}
+ name: hawkular-metrics
+ ports:
+ - name: http-endpoint
+ containerPort: 8080
+ - name: https-endpoint
+ containerPort: 8443
+ - name: ping
+ containerPort: 8888
+ command:
+ - "/opt/hawkular/scripts/hawkular-metrics-wrapper.sh"
+ - "-b"
+ - 0.0.0.0
+ - "-Dhawkular.metrics.cassandra.nodes=hawkular-cassandra"
+ - "-Dhawkular.metrics.cassandra.use-ssl"
+ - "-Dhawkular.metrics.openshift.auth-methods=openshift-oauth,htpasswd"
+ - "-Dhawkular.metrics.openshift.htpasswd-file=/secrets/hawkular-metrics.htpasswd.file"
+ - "-Dhawkular.metrics.allowed-cors-access-control-allow-headers=authorization"
+ - "-Dhawkular.metrics.default-ttl={{openshift_metrics_duration}}"
+ - "-Dhawkular-alerts.cassandra-nodes=hawkular-cassandra"
+ - "-Dhawkular-alerts.cassandra-use-ssl"
+ - "-Dhawkular.alerts.openshift.auth-methods=openshift-oauth,htpasswd"
+ - "-Dhawkular.alerts.openshift.htpasswd-file=/secrets/hawkular-metrics.htpasswd.file"
+ - "-Dhawkular.alerts.allowed-cors-access-control-allow-headers=authorization"
+ - "-Dorg.apache.tomcat.util.buf.UDecoder.ALLOW_ENCODED_SLASH=true"
+ - "-Dorg.apache.catalina.connector.CoyoteAdapter.ALLOW_BACKSLASH=true"
+ - "-DKUBERNETES_MASTER_URL={{openshift_metrics_master_url}}"
+ - "-DUSER_WRITE_ACCESS={{openshift_metrics_hawkular_user_write_access}}"
+ - "--hmw.keystore=/secrets/hawkular-metrics.keystore"
+ - "--hmw.truststore=/secrets/hawkular-metrics.truststore"
+ - "--hmw.keystore_password_file=/secrets/hawkular-metrics.keystore.password"
+ - "--hmw.truststore_password_file=/secrets/hawkular-metrics.truststore.password"
+ - "--hmw.jgroups_keystore=/secrets/hawkular-metrics.jgroups.keystore"
+ - "--hmw.jgroups_keystore_password_file=/secrets/hawkular-metrics.jgroups.keystore.password"
+ - "--hmw.jgroups_alias_file=/secrets/hawkular-metrics.jgroups.alias"
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: MASTER_URL
+ value: "{{ openshift_metrics_master_url }}"
+ - name: OPENSHIFT_KUBE_PING_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: OPENSHIFT_KUBE_PING_LABELS
+ value: "metrics-infra=hawkular-metrics,name=hawkular-metrics"
+ - name: STARTUP_TIMEOUT
+ value: "{{ openshift_metrics_startup_timeout }}"
+ volumeMounts:
+ - name: hawkular-metrics-secrets
+ mountPath: "/secrets"
+ - name: hawkular-metrics-client-secrets
+ mountPath: "/client-secrets"
+{% if ((openshift_metrics_hawkular_limits_cpu is defined and openshift_metrics_hawkular_limits_cpu is not none)
+ or (openshift_metrics_hawkular_limits_memory is defined and openshift_metrics_hawkular_limits_memory is not none)
+ or (openshift_metrics_hawkular_requests_cpu is defined and openshift_metrics_hawkular_requests_cpu is not none)
+ or (openshift_metrics_hawkular_requests_memory is defined and openshift_metrics_hawkular_requests_memory is not none))
+%}
+ resources:
+{% if (openshift_metrics_hawkular_limits_cpu is not none
+ or openshift_metrics_hawkular_limits_memory is not none)
+%}
+ limits:
+{% if openshift_metrics_hawkular_limits_cpu is not none %}
+ cpu: "{{openshift_metrics_hawkular_limits_cpu}}"
+{% endif %}
+{% if openshift_metrics_hawkular_limits_memory is not none %}
+ memory: "{{openshift_metrics_hawkular_limits_memory}}"
+{% endif %}
+{% endif %}
+{% if (openshift_metrics_hawkular_requests_cpu is not none
+ or openshift_metrics_hawkular_requests_memory is not none)
+%}
+ requests:
+{% if openshift_metrics_hawkular_requests_cpu is not none %}
+ cpu: "{{openshift_metrics_hawkular_requests_cpu}}"
+{% endif %}
+{% if openshift_metrics_hawkular_requests_memory is not none %}
+ memory: "{{openshift_metrics_hawkular_requests_memory}}"
+{% endif %}
+{% endif %}
+{% endif %}
+ readinessProbe:
+ exec:
+ command:
+ - "/opt/hawkular/scripts/hawkular-metrics-readiness.py"
+ livenessProbe:
+ exec:
+ command:
+ - "/opt/hawkular/scripts/hawkular-metrics-liveness.py"
+ volumes:
+ - name: hawkular-metrics-secrets
+ secret:
+ secretName: hawkular-metrics-secrets
+ - name: hawkular-metrics-client-secrets
+ secret:
+ secretName: hawkular-metrics-account
diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2
new file mode 100644
index 000000000..eeca03be0
--- /dev/null
+++ b/roles/openshift_metrics/templates/heapster.j2
@@ -0,0 +1,98 @@
+apiVersion: "v1"
+kind: "ReplicationController"
+metadata:
+ name: heapster
+ labels:
+ metrics-infra: heapster
+ name: heapster
+spec:
+ selector:
+ name: heapster
+ replicas: {{replica_count}}
+ template:
+ version: v1
+ metadata:
+ name: heapster
+ labels:
+ metrics-infra: heapster
+ name: heapster
+ spec:
+ serviceAccountName: heapster
+ containers:
+ - name: heapster
+ image: {{openshift_metrics_image_prefix}}metrics-heapster:{{openshift_metrics_image_version}}
+ ports:
+ - containerPort: 8082
+ name: "http-endpoint"
+ command:
+ - "heapster-wrapper.sh"
+ - "--wrapper.allowed_users_file=/secrets/heapster.allowed-users"
+ - "--source=kubernetes.summary_api:${MASTER_URL}?useServiceAccount=true&kubeletHttps=true&kubeletPort=10250"
+ - "--tls_cert=/secrets/heapster.cert"
+ - "--tls_key=/secrets/heapster.key"
+ - "--tls_client_ca=/secrets/heapster.client-ca"
+ - "--allowed_users=%allowed_users%"
+ - "--metric_resolution={{openshift_metrics_resolution}}"
+{% if not openshift_metrics_heapster_standalone %}
+ - "--wrapper.username_file=/hawkular-account/hawkular-metrics.username"
+ - "--wrapper.password_file=/hawkular-account/hawkular-metrics.password"
+ - "--wrapper.endpoint_check=https://hawkular-metrics:443/hawkular/metrics/status"
+ - "--sink=hawkular:https://hawkular-metrics:443?tenant=_system&labelToTenant=pod_namespace&labelNodeId={{openshift_metrics_node_id}}&caCert=/hawkular-cert/hawkular-metrics-ca.certificate&user=%username%&pass=%password%&filter=label(container_name:^system.slice.*|^user.slice)"
+{% endif %}
+ env:
+ - name: STARTUP_TIMEOUT
+ value: "{{ openshift_metrics_startup_timeout }}"
+{% if ((openshift_metrics_heapster_limits_cpu is defined and openshift_metrics_heapster_limits_cpu is not none)
+ or (openshift_metrics_heapster_limits_memory is defined and openshift_metrics_heapster_limits_memory is not none)
+ or (openshift_metrics_heapster_requests_cpu is defined and openshift_metrics_heapster_requests_cpu is not none)
+ or (openshift_metrics_heapster_requests_memory is defined and openshift_metrics_heapster_requests_memory is not none))
+%}
+ resources:
+{% if (openshift_metrics_heapster_limits_cpu is not none
+ or openshift_metrics_heapster_limits_memory is not none)
+%}
+ limits:
+{% if openshift_metrics_heapster_limits_cpu is not none %}
+ cpu: "{{openshift_metrics_heapster_limits_cpu}}"
+{% endif %}
+{% if openshift_metrics_heapster_limits_memory is not none %}
+ memory: "{{openshift_metrics_heapster_limits_memory}}"
+{% endif %}
+{% endif %}
+{% if (openshift_metrics_heapster_requests_cpu is not none
+ or openshift_metrics_heapster_requests_memory is not none)
+%}
+ requests:
+{% if openshift_metrics_heapster_requests_cpu is not none %}
+ cpu: "{{openshift_metrics_heapster_requests_cpu}}"
+{% endif %}
+{% if openshift_metrics_heapster_requests_memory is not none %}
+ memory: "{{openshift_metrics_heapster_requests_memory}}"
+{% endif %}
+{% endif %}
+{% endif %}
+ volumeMounts:
+ - name: heapster-secrets
+ mountPath: "/secrets"
+{% if not openshift_metrics_heapster_standalone %}
+ - name: hawkular-metrics-certificate
+ mountPath: "/hawkular-cert"
+ - name: hawkular-metrics-account
+ mountPath: "/hawkular-account"
+ readinessProbe:
+ exec:
+ command:
+ - "/opt/heapster-readiness.sh"
+{% endif %}
+ volumes:
+ - name: heapster-secrets
+ secret:
+ secretName: heapster-secrets
+{% if not openshift_metrics_heapster_standalone %}
+ - name: hawkular-metrics-certificate
+ secret:
+ secretName: hawkular-metrics-certificate
+ - name: hawkular-metrics-account
+ secret:
+ secretName: hawkular-metrics-account
+{% endif %}
diff --git a/roles/openshift_metrics/templates/pvc.j2 b/roles/openshift_metrics/templates/pvc.j2
new file mode 100644
index 000000000..8fbfa8b5d
--- /dev/null
+++ b/roles/openshift_metrics/templates/pvc.j2
@@ -0,0 +1,27 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{obj_name}}
+{% if labels is not defined %}
+ labels:
+ logging-infra: support
+{% elif labels %}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{ key }}: {{ value }}
+{% endfor %}
+{% endif %}
+{% if annotations is defined and annotations %}
+ annotations:
+{% for key,value in annotations.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+ accessModes:
+{% for mode in access_modes %}
+ - {{ mode }}
+{% endfor %}
+ resources:
+ requests:
+ storage: {{size}}
diff --git a/roles/openshift_metrics/templates/rolebinding.j2 b/roles/openshift_metrics/templates/rolebinding.j2
new file mode 100644
index 000000000..5230f0780
--- /dev/null
+++ b/roles/openshift_metrics/templates/rolebinding.j2
@@ -0,0 +1,23 @@
+apiVersion: v1
+kind: {% if cluster is defined and cluster %}Cluster{% endif %}RoleBinding
+metadata:
+ name: {{obj_name}}
+{% if labels is defined %}
+ labels:
+{% for k, v in labels.iteritems() %}
+ {{ k }}: {{ v }}
+{% endfor %}
+{% endif %}
+roleRef:
+{% if 'kind' in roleRef %}
+ kind: {{ roleRef.kind }}
+{% endif %}
+ name: {{ roleRef.name }}
+subjects:
+{% for sub in subjects %}
+ - kind: {{ sub.kind }}
+ name: {{ sub.name }}
+{% if 'namespace' in sub %}
+ namespace: {{ sub.namespace }}
+{% endif %}
+{% endfor %}
diff --git a/roles/openshift_metrics/templates/route.j2 b/roles/openshift_metrics/templates/route.j2
new file mode 100644
index 000000000..08ca87288
--- /dev/null
+++ b/roles/openshift_metrics/templates/route.j2
@@ -0,0 +1,35 @@
+apiVersion: v1
+kind: Route
+metadata:
+ name: {{ name }}
+{% if labels is defined and labels %}
+ labels:
+{% for k, v in labels.iteritems() %}
+ {{ k }}: {{ v }}
+{% endfor %}
+{% endif %}
+spec:
+ host: {{ host }}
+ to:
+ kind: {{ to.kind }}
+ name: {{ to.name }}
+{% if tls is defined %}
+ tls:
+ termination: {{ tls.termination }}
+{% if tls.ca_certificate is defined and tls.ca_certificate | length > 0 %}
+ CACertificate: |
+{{ tls.ca_certificate|indent(6, true) }}
+{% endif %}
+{% if tls.key is defined and tls.key | length > 0 %}
+ key: |
+{{ tls.key|indent(6, true) }}
+{% endif %}
+{% if tls.certificate is defined and tls.certificate | length > 0 %}
+ certificate: |
+{{ tls.certificate|indent(6, true) }}
+{% endif %}
+{% if tls.termination == 'reencrypt' %}
+ destinationCACertificate: |
+{{ tls.destination_ca_certificate|indent(6, true) }}
+{% endif %}
+{% endif %}
diff --git a/roles/openshift_metrics/templates/secret.j2 b/roles/openshift_metrics/templates/secret.j2
new file mode 100644
index 000000000..370890c7d
--- /dev/null
+++ b/roles/openshift_metrics/templates/secret.j2
@@ -0,0 +1,12 @@
+apiVersion: v1
+kind: Secret
+metadata:
+ name: "{{ name }}"
+ labels:
+{% for k, v in labels.iteritems() %}
+ {{ k }}: {{ v }}
+{% endfor %}
+data:
+{% for k, v in data.iteritems() %}
+ {{ k }}: {{ v }}
+{% endfor %}
diff --git a/roles/openshift_metrics/templates/service.j2 b/roles/openshift_metrics/templates/service.j2
new file mode 100644
index 000000000..8df89127b
--- /dev/null
+++ b/roles/openshift_metrics/templates/service.j2
@@ -0,0 +1,32 @@
+apiVersion: "v1"
+kind: "Service"
+metadata:
+ name: "{{obj_name}}"
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+spec:
+{% if headless is defined and headless %}
+ portalIP: None
+ clusterIP: None
+{% endif %}
+ ports:
+{% for port in ports %}
+ -
+{% for key, value in port.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% if port.targetPort is undefined %}
+ clusterIP: "None"
+{% endif %}
+{% endfor %}
+{% if service_targetPort is defined %}
+ targetPort: {{service_targetPort}}
+{% endif %}
+ selector:
+ {% for key, value in selector.iteritems() %}
+ {{key}}: {{value}}
+ {% endfor %}
diff --git a/roles/openshift_metrics/templates/serviceaccount.j2 b/roles/openshift_metrics/templates/serviceaccount.j2
new file mode 100644
index 000000000..b22acc594
--- /dev/null
+++ b/roles/openshift_metrics/templates/serviceaccount.j2
@@ -0,0 +1,16 @@
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: {{obj_name}}
+{% if labels is defined%}
+ labels:
+{% for key, value in labels.iteritems() %}
+ {{key}}: {{value}}
+{% endfor %}
+{% endif %}
+{% if secrets is defined %}
+secrets:
+{% for name in secrets %}
+- name: {{ name }}
+{% endfor %}
+{% endif %}
diff --git a/roles/openshift_metrics/vars/main.yaml b/roles/openshift_metrics/vars/main.yaml
index 6c207d6ac..4a3724e3f 100644
--- a/roles/openshift_metrics/vars/main.yaml
+++ b/roles/openshift_metrics/vars/main.yaml
@@ -1,21 +1,10 @@
---
-hawkular_permission_oc_commands:
- - policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer -n openshift-infra
- - policy add-cluster-role-to-user cluster-admin system:serviceaccount:openshift-infra:heapster
-
-metrics_deployer_sa:
- apiVersion: v1
- kind: ServiceAccount
- metadata:
- name: metrics-deployer
- secrets:
- - name: metrics-deployer
-
-
-hawkular_tmp_conf: /tmp/hawkular_admin.kubeconfig
-
-hawkular_persistence: "{% if openshift.hosted.metrics.storage.kind != None %}true{% else %}false{% endif %}"
-
-hawkular_type: "{{ 'origin' if deployment_type == 'origin' else 'enterprise' }}"
-
-metrics_upgrade: openshift.hosted.metrics.upgrade | default(False)
+#
+# These vars are generally considered private and not expected to be altered
+# by end users
+#
+
+openshift_metrics_cassandra_storage_types:
+- emptydir
+- pv
+- dynamic
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index d1920c485..b69b60c1d 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -43,10 +43,12 @@ Currently we support re-labeling nodes but we don't re-schedule running pods nor
```
oadm manage-node --schedulable=false ${NODE}
-oadm manage-node --evacuate ${NODE}
+oadm manage-node --drain ${NODE}
oadm manage-node --schedulable=true ${NODE}
````
+> If you are using version less than 1.5/3.5 you must replace `--drain` with `--evacuate`.
+
TODO
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index c39269f33..10036abed 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -11,4 +11,35 @@ galaxy_info:
- 7
categories:
- cloud
-dependencies: []
+dependencies:
+- role: openshift_common
+- role: openshift_clock
+- role: openshift_docker
+- role: openshift_node_certificates
+- role: openshift_cloud_provider
+- role: os_firewall
+ os_firewall_allow:
+ - service: Kubernetes kubelet
+ port: 10250/tcp
+ - service: http
+ port: 80/tcp
+ - service: https
+ port: 443/tcp
+ - service: Openshift kubelet ReadOnlyPort
+ port: 10255/tcp
+ - service: Openshift kubelet ReadOnlyPort udp
+ port: 10255/udp
+- role: os_firewall
+ os_firewall_allow:
+ - service: OpenShift OVS sdn
+ port: 4789/udp
+ when: openshift.common.use_openshift_sdn | bool
+- role: os_firewall
+ os_firewall_allow:
+ - service: Kubernetes service NodePort TCP
+ port: "{{ openshift_node_port_range | default('') }}/tcp"
+ - service: Kubernetes service NodePort UDP
+ port: "{{ openshift_node_port_range | default('') }}/udp"
+ when: openshift_node_port_range is defined
+- role: openshift_node_dnsmasq
+ when: openshift.common.use_dnsmasq | bool
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 3b5865a50..e33d5d497 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -15,7 +15,7 @@ After={{ openshift.common.service_type }}-node-dep.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node-dep
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }}{{ ':rslave' if openshift.docker.gte_1_10 | default(False) | bool else '' }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:rw -v /sys/fs/cgroup:/sys/fs/cgroup:rw -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 717bf3cea..a263f4f3a 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -49,32 +49,38 @@
--certificate-authority {{ named_ca_certificate }}
{% endfor %}
--certificate-authority={{ openshift_ca_cert }}
- --client-dir={{ openshift_node_generated_config_dir }}
+ --client-dir={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}
--groups=system:nodes
--master={{ hostvars[openshift_ca_host].openshift.master.api_url }}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
- --user=system:node:{{ openshift.common.hostname }}
+ --user=system:node:{{ hostvars[item].openshift.common.hostname }}
args:
- creates: "{{ openshift_node_generated_config_dir }}"
- when: node_certs_missing | bool
+ creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}"
+ with_items: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
- name: Generate the node server certificate
command: >
{{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert
- --cert={{ openshift_node_generated_config_dir }}/server.crt
- --key={{ openshift_generated_configs_dir }}/node-{{ openshift.common.hostname }}/server.key
+ --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt
+ --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key
--overwrite=true
- --hostnames={{ openshift.common.hostname }},{{ openshift.common.public_hostname }},{{ openshift.common.ip }},{{ openshift.common.public_ip }}
+ --hostnames={{ hostvars[item].openshift.common.hostname }},{{ hostvars[item].openshift.common.public_hostname }},{{ hostvars[item].openshift.common.ip }},{{ hostvars[item].openshift.common.public_ip }}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
args:
- creates: "{{ openshift_node_generated_config_dir }}/server.crt"
- when: node_certs_missing | bool
- delegate_to: "{{ openshift_ca_host}}"
+ creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt"
+ with_items: "{{ hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
- name: Create local temp directory for syncing certs
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
diff --git a/roles/openshift_preflight/README.md b/roles/openshift_preflight/README.md
new file mode 100644
index 000000000..b6d3542d3
--- /dev/null
+++ b/roles/openshift_preflight/README.md
@@ -0,0 +1,52 @@
+OpenShift Preflight Checks
+==========================
+
+This role detects common problems prior to installing OpenShift.
+
+Requirements
+------------
+
+* Ansible 2.2+
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+```yaml
+---
+- hosts: OSEv3
+ roles:
+ - openshift_preflight/init
+
+- hosts: OSEv3
+ name: checks that apply to all hosts
+ gather_facts: no
+ ignore_errors: yes
+ roles:
+ - openshift_preflight/common
+
+- hosts: OSEv3
+ name: verify check results
+ gather_facts: no
+ roles:
+ - openshift_preflight/verify_status
+```
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Customer Success team (dev@lists.openshift.redhat.com)
diff --git a/roles/openshift_preflight/base/library/aos_version.py b/roles/openshift_preflight/base/library/aos_version.py
new file mode 100755
index 000000000..f7fcb6da5
--- /dev/null
+++ b/roles/openshift_preflight/base/library/aos_version.py
@@ -0,0 +1,100 @@
+#!/usr/bin/python
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+An ansible module for determining if more than one minor version
+of any atomic-openshift package is available, which would indicate
+that multiple repos are enabled for different versions of the same
+thing which may cause problems.
+
+Also, determine if the version requested is available down to the
+precision requested.
+'''
+
+# import os
+# import sys
+import yum # pylint: disable=import-error
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main(): # pylint: disable=missing-docstring
+ module = AnsibleModule(
+ argument_spec=dict(
+ version=dict(required=True)
+ ),
+ supports_check_mode=True
+ )
+
+ # NOTE(rhcarvalho): sosiouxme added _unmute, but I couldn't find a case yet
+ # for when it is actually necessary. Leaving it commented out for now,
+ # though this comment and the commented out code related to _unmute should
+ # be deleted later if not proven necessary.
+
+ # sys.stdout = os.devnull # mute yum so it doesn't break our output
+ # sys.stderr = os.devnull # mute yum so it doesn't break our output
+
+ # def _unmute(): # pylint: disable=missing-docstring
+ # sys.stdout = sys.__stdout__
+
+ def bail(error): # pylint: disable=missing-docstring
+ # _unmute()
+ module.fail_json(msg=error)
+
+ yb = yum.YumBase() # pylint: disable=invalid-name
+
+ # search for package versions available for aos pkgs
+ expected_pkgs = [
+ 'atomic-openshift',
+ 'atomic-openshift-master',
+ 'atomic-openshift-node',
+ ]
+ try:
+ pkgs = yb.pkgSack.returnPackages(patterns=expected_pkgs)
+ except yum.Errors.PackageSackError as e: # pylint: disable=invalid-name
+ # you only hit this if *none* of the packages are available
+ bail('Unable to find any atomic-openshift packages. \nCheck your subscription and repo settings. \n%s' % e)
+
+ # determine what level of precision we're expecting for the version
+ expected_version = module.params['version']
+ if expected_version.startswith('v'): # v3.3 => 3.3
+ expected_version = expected_version[1:]
+ num_dots = expected_version.count('.')
+
+ pkgs_by_name_version = {}
+ pkgs_precise_version_found = {}
+ for pkg in pkgs:
+ # get expected version precision
+ match_version = '.'.join(pkg.version.split('.')[:num_dots + 1])
+ if match_version == expected_version:
+ pkgs_precise_version_found[pkg.name] = True
+ # get x.y version precision
+ minor_version = '.'.join(pkg.version.split('.')[:2])
+ if pkg.name not in pkgs_by_name_version:
+ pkgs_by_name_version[pkg.name] = {}
+ pkgs_by_name_version[pkg.name][minor_version] = True
+
+ # see if any packages couldn't be found at requested version
+ # see if any packages are available in more than one minor version
+ not_found = []
+ multi_found = []
+ for name in expected_pkgs:
+ if name not in pkgs_precise_version_found:
+ not_found.append(name)
+ if name in pkgs_by_name_version and len(pkgs_by_name_version[name]) > 1:
+ multi_found.append(name)
+ if not_found:
+ msg = 'Not all of the required packages are available at requested version %s:\n' % expected_version
+ for name in not_found:
+ msg += ' %s\n' % name
+ bail(msg + 'Please check your subscriptions and enabled repositories.')
+ if multi_found:
+ msg = 'Multiple minor versions of these packages are available\n'
+ for name in multi_found:
+ msg += ' %s\n' % name
+ bail(msg + "There should only be one OpenShift version's repository enabled at a time.")
+
+ # _unmute()
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_preflight/base/library/check_yum_update.py b/roles/openshift_preflight/base/library/check_yum_update.py
new file mode 100755
index 000000000..296ebd44f
--- /dev/null
+++ b/roles/openshift_preflight/base/library/check_yum_update.py
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Ansible module to test whether a yum update or install will succeed,
+without actually performing it or running yum.
+parameters:
+ packages: (optional) A list of package names to install or update.
+ If omitted, all installed RPMs are considered for updates.
+'''
+
+# import os
+import sys
+import yum # pylint: disable=import-error
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main(): # pylint: disable=missing-docstring,too-many-branches
+ module = AnsibleModule(
+ argument_spec=dict(
+ packages=dict(type='list', default=[])
+ ),
+ supports_check_mode=True
+ )
+
+ # NOTE(rhcarvalho): sosiouxme added _unmute, but I couldn't find a case yet
+ # for when it is actually necessary. Leaving it commented out for now,
+ # though this comment and the commented out code related to _unmute should
+ # be deleted later if not proven necessary.
+
+ # sys.stdout = os.devnull # mute yum so it doesn't break our output
+
+ # def _unmute(): # pylint: disable=missing-docstring
+ # sys.stdout = sys.__stdout__
+
+ def bail(error): # pylint: disable=missing-docstring
+ # _unmute()
+ module.fail_json(msg=error)
+
+ yb = yum.YumBase() # pylint: disable=invalid-name
+ # determine if the existing yum configuration is valid
+ try:
+ yb.repos.populateSack(mdtype='metadata', cacheonly=1)
+ # for error of type:
+ # 1. can't reach the repo URL(s)
+ except yum.Errors.NoMoreMirrorsRepoError as e: # pylint: disable=invalid-name
+ bail('Error getting data from at least one yum repository: %s' % e)
+ # 2. invalid repo definition
+ except yum.Errors.RepoError as e: # pylint: disable=invalid-name
+ bail('Error with yum repository configuration: %s' % e)
+ # 3. other/unknown
+ # * just report the problem verbatim
+ except: # pylint: disable=bare-except; # noqa
+ bail('Unexpected error with yum repository: %s' % sys.exc_info()[1])
+
+ packages = module.params['packages']
+ no_such_pkg = []
+ for pkg in packages:
+ try:
+ yb.install(name=pkg)
+ except yum.Errors.InstallError as e: # pylint: disable=invalid-name
+ no_such_pkg.append(pkg)
+ except: # pylint: disable=bare-except; # noqa
+ bail('Unexpected error with yum install/update: %s' %
+ sys.exc_info()[1])
+ if not packages:
+ # no packages requested means test a yum update of everything
+ yb.update()
+ elif no_such_pkg:
+ # wanted specific packages to install but some aren't available
+ user_msg = 'Cannot install all of the necessary packages. Unavailable:\n'
+ for pkg in no_such_pkg:
+ user_msg += ' %s\n' % pkg
+ user_msg += 'You may need to enable one or more yum repositories to make this content available.'
+ bail(user_msg)
+
+ try:
+ txn_result, txn_msgs = yb.buildTransaction()
+ except: # pylint: disable=bare-except; # noqa
+ bail('Unexpected error during dependency resolution for yum update: \n %s' %
+ sys.exc_info()[1])
+
+ # find out if there are any errors with the update/install
+ if txn_result == 0: # 'normal exit' meaning there's nothing to install/update
+ pass
+ elif txn_result == 1: # error with transaction
+ user_msg = 'Could not perform a yum update.\n'
+ if len(txn_msgs) > 0:
+ user_msg += 'Errors from dependency resolution:\n'
+ for msg in txn_msgs:
+ user_msg += ' %s\n' % msg
+ user_msg += 'You should resolve these issues before proceeding with an install.\n'
+ user_msg += 'You may need to remove or downgrade packages or enable/disable yum repositories.'
+ bail(user_msg)
+ # TODO: it would be nice depending on the problem:
+ # 1. dependency for update not found
+ # * construct the dependency tree
+ # * find the installed package(s) that required the missing dep
+ # * determine if any of these packages matter to openshift
+ # * build helpful error output
+ # 2. conflicts among packages in available content
+ # * analyze dependency tree and build helpful error output
+ # 3. other/unknown
+ # * report the problem verbatim
+ # * add to this list as we come across problems we can clearly diagnose
+ elif txn_result == 2: # everything resolved fine
+ pass
+ else:
+ bail('Unknown error(s) from dependency resolution. Exit Code: %d:\n%s' %
+ (txn_result, txn_msgs))
+
+ # _unmute()
+ module.exit_json(changed=False)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_preflight/common/meta/main.yml b/roles/openshift_preflight/common/meta/main.yml
new file mode 100644
index 000000000..6f23cbf3b
--- /dev/null
+++ b/roles/openshift_preflight/common/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - role: openshift_preflight/base
diff --git a/roles/openshift_preflight/common/tasks/main.yml b/roles/openshift_preflight/common/tasks/main.yml
new file mode 100644
index 000000000..f1a4a160e
--- /dev/null
+++ b/roles/openshift_preflight/common/tasks/main.yml
@@ -0,0 +1,21 @@
+---
+# check content available on all hosts
+- when: not openshift.common.is_containerized | bool
+ block:
+
+ - name: determine if yum update will work
+ action: check_yum_update
+ register: r
+
+ - set_fact:
+ oo_preflight_check_results: "{{ oo_preflight_check_results + [r|combine({'_task': 'determine if yum update will work'})] }}"
+
+ - name: determine if expected version matches what is available
+ aos_version:
+ version: "{{ openshift_release }}"
+ when:
+ - deployment_type == "openshift-enterprise"
+ register: r
+
+ - set_fact:
+ oo_preflight_check_results: "{{ oo_preflight_check_results + [r|combine({'_task': 'determine if expected version matches what is available'})] }}"
diff --git a/roles/openshift_preflight/init/meta/main.yml b/roles/openshift_preflight/init/meta/main.yml
new file mode 100644
index 000000000..0bbeadd34
--- /dev/null
+++ b/roles/openshift_preflight/init/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - role: openshift_facts
diff --git a/roles/openshift_preflight/init/tasks/main.yml b/roles/openshift_preflight/init/tasks/main.yml
new file mode 100644
index 000000000..bf2d82196
--- /dev/null
+++ b/roles/openshift_preflight/init/tasks/main.yml
@@ -0,0 +1,4 @@
+---
+- name: set common variables
+ set_fact:
+ oo_preflight_check_results: "{{ oo_preflight_check_results | default([]) }}"
diff --git a/roles/openshift_preflight/masters/meta/main.yml b/roles/openshift_preflight/masters/meta/main.yml
new file mode 100644
index 000000000..6f23cbf3b
--- /dev/null
+++ b/roles/openshift_preflight/masters/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - role: openshift_preflight/base
diff --git a/roles/openshift_preflight/masters/tasks/main.yml b/roles/openshift_preflight/masters/tasks/main.yml
new file mode 100644
index 000000000..35fb1e3ca
--- /dev/null
+++ b/roles/openshift_preflight/masters/tasks/main.yml
@@ -0,0 +1,31 @@
+---
+# determine if yum install of master pkgs will work
+- when: not openshift.common.is_containerized | bool
+ block:
+
+ - name: main master packages availability
+ check_yum_update:
+ packages:
+ - "{{ openshift.common.service_type }}"
+ - "{{ openshift.common.service_type }}-clients"
+ - "{{ openshift.common.service_type }}-master"
+ register: r
+
+ - set_fact:
+ oo_preflight_check_results: "{{ oo_preflight_check_results + [r|combine({'_task': 'main master packages availability'})] }}"
+
+ - name: other master packages availability
+ check_yum_update:
+ packages:
+ - etcd
+ - bash-completion
+ - cockpit-bridge
+ - cockpit-docker
+ - cockpit-kubernetes
+ - cockpit-shell
+ - cockpit-ws
+ - httpd-tools
+ register: r
+
+ - set_fact:
+ oo_preflight_check_results: "{{ oo_preflight_check_results + [r|combine({'_task': 'other master packages availability'})] }}"
diff --git a/roles/openshift_preflight/nodes/meta/main.yml b/roles/openshift_preflight/nodes/meta/main.yml
new file mode 100644
index 000000000..6f23cbf3b
--- /dev/null
+++ b/roles/openshift_preflight/nodes/meta/main.yml
@@ -0,0 +1,3 @@
+---
+dependencies:
+ - role: openshift_preflight/base
diff --git a/roles/openshift_preflight/nodes/tasks/main.yml b/roles/openshift_preflight/nodes/tasks/main.yml
new file mode 100644
index 000000000..a10e69024
--- /dev/null
+++ b/roles/openshift_preflight/nodes/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+# determine if yum install of node pkgs will work
+- when: not openshift.common.is_containerized | bool
+ block:
+
+ - name: main node packages availability
+ check_yum_update:
+ packages:
+ - "{{ openshift.common.service_type }}"
+ - "{{ openshift.common.service_type }}-node"
+ - "{{ openshift.common.service_type }}-sdn-ovs"
+ register: r
+
+ - set_fact:
+ oo_preflight_check_results: "{{ oo_preflight_check_results + [r|combine({'_task': 'main node packages availability'})] }}"
+
+ - name: other node packages availability
+ check_yum_update:
+ packages:
+ - docker
+ - PyYAML
+ - firewalld
+ - iptables
+ - iptables-services
+ - nfs-utils
+ - ntp
+ - yum-utils
+ - dnsmasq
+ - libselinux-python
+ - ceph-common
+ - glusterfs-fuse
+ - iscsi-initiator-utils
+ - pyparted
+ - python-httplib2
+ - openssl
+ - flannel
+ - bind
+ register: r
+
+ - set_fact:
+ oo_preflight_check_results: "{{ oo_preflight_check_results + [r|combine({'_task': 'other node packages availability'})] }}"
diff --git a/roles/openshift_preflight/verify_status/callback_plugins/zz_failure_summary.py b/roles/openshift_preflight/verify_status/callback_plugins/zz_failure_summary.py
new file mode 100644
index 000000000..180ed8d8f
--- /dev/null
+++ b/roles/openshift_preflight/verify_status/callback_plugins/zz_failure_summary.py
@@ -0,0 +1,96 @@
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Ansible callback plugin.
+'''
+
+from ansible.plugins.callback import CallbackBase
+from ansible import constants as C
+from ansible.utils.color import stringc
+
+
+class CallbackModule(CallbackBase):
+ '''
+ This callback plugin stores task results and summarizes failures.
+ The file name is prefixed with `zz_` to make this plugin be loaded last by
+ Ansible, thus making its output the last thing that users see.
+ '''
+
+ CALLBACK_VERSION = 2.0
+ CALLBACK_TYPE = 'aggregate'
+ CALLBACK_NAME = 'failure_summary'
+ CALLBACK_NEEDS_WHITELIST = False
+
+ def __init__(self):
+ super(CallbackModule, self).__init__()
+ self.__failures = []
+
+ def v2_runner_on_failed(self, result, ignore_errors=False):
+ super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
+ self.__failures.append(dict(result=result, ignore_errors=ignore_errors))
+
+ def v2_playbook_on_stats(self, stats):
+ super(CallbackModule, self).v2_playbook_on_stats(stats)
+ # TODO: update condition to consider a host var or env var to
+ # enable/disable the summary, so that we can control the output from a
+ # play.
+ if self.__failures:
+ self._print_failure_summary()
+
+ def _print_failure_summary(self):
+ '''Print a summary of failed tasks (including ignored failures).'''
+ self._display.display(u'\nFailure summary:\n')
+
+ # TODO: group failures by host or by task. If grouped by host, it is
+ # easy to see all problems of a given host. If grouped by task, it is
+ # easy to see what hosts needs the same fix.
+
+ width = len(str(len(self.__failures)))
+ initial_indent_format = u' {{:>{width}}}. '.format(width=width)
+ initial_indent_len = len(initial_indent_format.format(0))
+ subsequent_indent = u' ' * initial_indent_len
+ subsequent_extra_indent = u' ' * (initial_indent_len + 10)
+
+ for i, failure in enumerate(self.__failures, 1):
+ lines = _format_failure(failure)
+ self._display.display(u'\n{}{}'.format(initial_indent_format.format(i), lines[0]))
+ for line in lines[1:]:
+ line = line.replace(u'\n', u'\n' + subsequent_extra_indent)
+ indented = u'{}{}'.format(subsequent_indent, line)
+ self._display.display(indented)
+
+
+# Reason: disable pylint protected-access because we need to access _*
+# attributes of a task result to implement this method.
+# Status: permanently disabled unless Ansible's API changes.
+# pylint: disable=protected-access
+def _format_failure(failure):
+ '''Return a list of pretty-formatted lines describing a failure, including
+ relevant information about it. Line separators are not included.'''
+ result = failure['result']
+ host = result._host.get_name()
+ play = _get_play(result._task)
+ if play:
+ play = play.get_name()
+ task = result._task.get_name()
+ msg = result._result.get('msg', u'???')
+ rows = (
+ (u'Host', host),
+ (u'Play', play),
+ (u'Task', task),
+ (u'Message', stringc(msg, C.COLOR_ERROR)),
+ )
+ row_format = '{:10}{}'
+ return [row_format.format(header + u':', body) for header, body in rows]
+
+
+# Reason: disable pylint protected-access because we need to access _*
+# attributes of obj to implement this function.
+# This is inspired by ansible.playbook.base.Base.dump_me.
+# Status: permanently disabled unless Ansible's API changes.
+# pylint: disable=protected-access
+def _get_play(obj):
+ '''Given a task or block, recursively tries to find its parent play.'''
+ if hasattr(obj, '_play'):
+ return obj._play
+ if getattr(obj, '_parent'):
+ return _get_play(obj._parent)
diff --git a/roles/openshift_preflight/verify_status/tasks/main.yml b/roles/openshift_preflight/verify_status/tasks/main.yml
new file mode 100644
index 000000000..36ccf648a
--- /dev/null
+++ b/roles/openshift_preflight/verify_status/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: find check failures
+ set_fact:
+ oo_preflight_check_failures: "{{ oo_preflight_check_results | select('failed', 'equalto', True) | list }}"
+
+- name: ensure all checks succeed
+ action: fail
+ when: oo_preflight_check_failures
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index d5ed9c09d..23dcd0440 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -37,7 +37,7 @@
when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
and openshift_deployment_type == 'origin'
and not openshift.common.is_containerized | bool
- and openshift_enable_origin_repo | default(true)
+ and openshift_enable_origin_repo | default(true) | bool
- name: Configure origin yum repositories RHEL/CentOS
copy:
@@ -47,4 +47,4 @@
when: ansible_os_family == "RedHat" and ansible_distribution != "Fedora"
and openshift_deployment_type == 'origin'
and not openshift.common.is_containerized | bool
- and openshift_enable_origin_repo | default(true)
+ and openshift_enable_origin_repo | default(true) | bool
diff --git a/roles/openshift_repos/templates/yum_repo.j2 b/roles/openshift_repos/templates/yum_repo.j2
index 2d9243545..ef2cd6603 100644
--- a/roles/openshift_repos/templates/yum_repo.j2
+++ b/roles/openshift_repos/templates/yum_repo.j2
@@ -2,9 +2,9 @@
[{{ repo.id }}]
name={{ repo.name | default(repo.id) }}
baseurl={{ repo.baseurl }}
-{% set enable_repo = repo.enabled | default('1') %}
+{% set enable_repo = repo.enabled | default(1) %}
enabled={{ 1 if ( enable_repo == 1 or enable_repo == True ) else 0 }}
-{% set enable_gpg_check = repo.gpgcheck | default('1') %}
+{% set enable_gpg_check = repo.gpgcheck | default(1) %}
gpgcheck={{ 1 if ( enable_gpg_check == 1 or enable_gpg_check == True ) else 0 }}
{% for key, value in repo.iteritems() %}
{% if key not in ['id', 'name', 'baseurl', 'enabled', 'gpgcheck'] and value is defined %}
diff --git a/roles/openshift_storage_nfs_lvm/README.md b/roles/openshift_storage_nfs_lvm/README.md
index 8b8471745..cc674d3fd 100644
--- a/roles/openshift_storage_nfs_lvm/README.md
+++ b/roles/openshift_storage_nfs_lvm/README.md
@@ -48,6 +48,13 @@ osnl_volume_num_start: 3
# How many volumes/partitions to build, with the size we stated.
osnl_number_of_volumes: 2
+# osnl_volume_reclaim_policy
+# Volume reclaim policy of a PersistentVolume tells the cluster
+# what to do with the volume after it is released.
+#
+# Valid values are "Retain" or "Recycle" (default).
+osnl_volume_reclaim_policy: "Recycle"
+
```
## Dependencies
@@ -71,6 +78,7 @@ exported via NFS. json files are created in /root.
osnl_volume_size: 5
osnl_volume_num_start: 3
osnl_number_of_volumes: 2
+ osnl_volume_reclaim_policy: "Recycle"
## Full example
@@ -96,6 +104,7 @@ exported via NFS. json files are created in /root.
osnl_volume_size: 5
osnl_volume_num_start: 3
osnl_number_of_volumes: 2
+ osnl_volume_reclaim_policy: "Recycle"
* Run the playbook:
```
diff --git a/roles/openshift_storage_nfs_lvm/defaults/main.yml b/roles/openshift_storage_nfs_lvm/defaults/main.yml
index f81cdc724..48352187c 100644
--- a/roles/openshift_storage_nfs_lvm/defaults/main.yml
+++ b/roles/openshift_storage_nfs_lvm/defaults/main.yml
@@ -8,3 +8,10 @@ osnl_mount_dir: /exports/openshift
# Volume Group to use.
osnl_volume_group: openshiftvg
+
+# Volume reclaim policy of a PersistentVolume tells the cluster
+# what to do with the volume after it is released.
+#
+# Valid values are "Retain" or "Recycle".
+# See https://docs.openshift.com/enterprise/3.0/architecture/additional_concepts/storage.html#pv-recycling-policy
+osnl_volume_reclaim_policy: "Recycle"
diff --git a/roles/openshift_storage_nfs_lvm/meta/main.yml b/roles/openshift_storage_nfs_lvm/meta/main.yml
index ea7c9bb45..50d94f6a3 100644
--- a/roles/openshift_storage_nfs_lvm/meta/main.yml
+++ b/roles/openshift_storage_nfs_lvm/meta/main.yml
@@ -14,4 +14,5 @@ galaxy_info:
- all
categories:
- openshift
-dependencies: []
+dependencies:
+- role: openshift_facts
diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml
index ea0cc2a94..49dd657b5 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/main.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml
@@ -2,7 +2,7 @@
# TODO -- this may actually work on atomic hosts
- fail:
msg: "openshift_storage_nfs_lvm is not compatible with atomic host"
- when: openshift.common.is_atomic | true
+ when: openshift.common.is_atomic | bool
- name: Create lvm volumes
lvol: vg={{osnl_volume_group}} lv={{ item }} size={{osnl_volume_size}}G
diff --git a/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2 b/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
index 3c4d2f56c..c273aca9f 100644
--- a/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
+++ b/roles/openshift_storage_nfs_lvm/templates/nfs.json.j2
@@ -12,10 +12,10 @@
"storage": "{{ osnl_volume_size }}Gi"
},
"accessModes": [ "ReadWriteOnce", "ReadWriteMany" ],
- "persistentVolumeReclaimPolicy": "Recycle",
+ "persistentVolumeReclaimPolicy": "{{ osnl_volume_reclaim_policy }}",
"nfs": {
- "Server": "{{ inventory_hostname }}",
- "Path": "{{ osnl_mount_dir }}/{{ item }}"
+ "server": "{{ inventory_hostname }}",
+ "path": "{{ osnl_mount_dir }}/{{ item }}"
}
}
}
diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml
index 718537287..cd0f20ae9 100644
--- a/roles/openshift_version/tasks/set_version_containerized.yml
+++ b/roles/openshift_version/tasks/set_version_containerized.yml
@@ -1,8 +1,9 @@
---
- name: Set containerized version to configure if openshift_image_tag specified
set_fact:
- # Expects a leading "v" in inventory, strip it off here:
- openshift_version: "{{ openshift_image_tag[1:].split('-')[0] }}"
+ # Expects a leading "v" in inventory, strip it off here unless
+ # openshift_image_tag=latest
+ openshift_version: "{{ openshift_image_tag[1:].split('-')[0] if openshift_image_tag != 'latest' else openshift_image_tag }}"
when: openshift_image_tag is defined and openshift_version is not defined
- name: Set containerized version to configure if openshift_release specified
diff --git a/roles/os_firewall/README.md b/roles/os_firewall/README.md
index c13c5dfc9..43db3cc74 100644
--- a/roles/os_firewall/README.md
+++ b/roles/os_firewall/README.md
@@ -4,6 +4,9 @@ OS Firewall
OS Firewall manages firewalld and iptables firewall settings for a minimal use
case (Adding/Removing rules based on protocol and port number).
+Note: firewalld is not supported on Atomic Host
+https://bugzilla.redhat.com/show_bug.cgi?id=1403331
+
Requirements
------------
@@ -14,7 +17,7 @@ Role Variables
| Name | Default | |
|---------------------------|---------|----------------------------------------|
-| os_firewall_use_firewalld | False | If false, use iptables |
+| os_firewall_use_firewalld | True | If false, use iptables |
| os_firewall_allow | [] | List of service,port mappings to allow |
| os_firewall_deny | [] | List of service, port mappings to deny |
@@ -31,6 +34,7 @@ Use iptables and open tcp ports 80 and 443:
---
- hosts: servers
vars:
+ os_firewall_use_firewalld: false
os_firewall_allow:
- service: httpd
port: 80/tcp
@@ -45,7 +49,6 @@ Use firewalld and open tcp port 443 and close previously open tcp port 80:
---
- hosts: servers
vars:
- os_firewall_use_firewalld: true
os_firewall_allow:
- service: https
port: 443/tcp
diff --git a/roles/os_firewall/defaults/main.yml b/roles/os_firewall/defaults/main.yml
index c870a301a..4c544122f 100644
--- a/roles/os_firewall/defaults/main.yml
+++ b/roles/os_firewall/defaults/main.yml
@@ -1,9 +1,7 @@
---
os_firewall_enabled: True
-# TODO: Upstream kubernetes only supports iptables currently
-# TODO: it might be possible to still use firewalld if we wire up the created
-# chains with the public zone (or the zone associated with the correct
-# interfaces)
-os_firewall_use_firewalld: False
+# firewalld is not supported on Atomic Host
+# https://bugzilla.redhat.com/show_bug.cgi?id=1403331
+os_firewall_use_firewalld: "{{ False if openshift.common.is_atomic | bool else True }}"
os_firewall_allow: []
os_firewall_deny: []
diff --git a/roles/os_firewall/library/os_firewall_manage_iptables.py b/roles/os_firewall/library/os_firewall_manage_iptables.py
index b60e52dfe..8d4878fa7 100755
--- a/roles/os_firewall/library/os_firewall_manage_iptables.py
+++ b/roles/os_firewall/library/os_firewall_manage_iptables.py
@@ -127,9 +127,17 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
check_cmd = self.cmd + ['-C'] + rule
return True if subprocess.call(check_cmd) == 0 else False
+ @staticmethod
+ def port_as_argument(port):
+ if isinstance(port, int):
+ return str(port)
+ if isinstance(port, basestring): # noqa: F405
+ return port.replace('-', ":")
+ return port
+
def gen_rule(self, port, proto):
return [self.chain, '-p', proto, '-m', 'state', '--state', 'NEW',
- '-m', proto, '--dport', str(port), '-j', 'ACCEPT']
+ '-m', proto, '--dport', IpTablesManager.port_as_argument(port), '-j', 'ACCEPT']
def create_jump(self):
if self.check_mode:
@@ -215,7 +223,9 @@ class IpTablesManager(object): # pylint: disable=too-many-instance-attributes
def gen_cmd(self):
cmd = 'iptables' if self.ip_version == 'ipv4' else 'ip6tables'
- return ["/usr/sbin/%s" % cmd]
+ # Include -w (wait for xtables lock) in default arguments.
+ default_args = ['-w']
+ return ["/usr/sbin/%s" % cmd] + default_args
def gen_save_cmd(self): # pylint: disable=no-self-use
return ['/usr/libexec/iptables/iptables.init', 'save']
@@ -231,7 +241,7 @@ def main():
create_jump_rule=dict(required=False, type='bool', default=True),
jump_rule_chain=dict(required=False, default='INPUT'),
protocol=dict(required=False, choices=['tcp', 'udp']),
- port=dict(required=False, type='int'),
+ port=dict(required=False, type='str'),
ip_version=dict(required=False, default='ipv4',
choices=['ipv4', 'ipv6']),
),
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index 1101870be..c4db197ca 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -1,7 +1,8 @@
---
- name: Install firewalld packages
- package: name=firewalld state=present
- when: not openshift.common.is_containerized | bool
+ package:
+ name: firewalld
+ state: present
- name: Ensure iptables services are not enabled
systemd:
diff --git a/roles/os_firewall/tasks/main.yml b/roles/os_firewall/tasks/main.yml
index 076e5e311..20efe5b0d 100644
--- a/roles/os_firewall/tasks/main.yml
+++ b/roles/os_firewall/tasks/main.yml
@@ -1,4 +1,10 @@
---
+- name: Assert - Do not use firewalld on Atomic Host
+ assert:
+ that: not os_firewall_use_firewalld | bool
+ msg: "Firewalld is not supported on Atomic Host"
+ when: openshift.common.is_atomic | bool
+
- include: firewall/firewalld.yml
when: os_firewall_enabled | bool and os_firewall_use_firewalld | bool
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index ba3b9a923..28c3c7080 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -27,6 +27,11 @@
creates: /etc/rhsm/ca/katello-server-ca.pem
when: rhel_subscription_server is defined and rhel_subscription_server
+- name: Install Red Hat Subscription manager
+ yum:
+ name: subscription-manager
+ state: present
+
- name: RedHat subscriptions
redhat_subscription:
username: "{{ rhel_subscription_user }}"
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 000000000..06346852c
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,27 @@
+[bdist_wheel]
+# This flag says that the code is written to work on both Python 2 and Python
+# 3. If at all possible, it is good practice to do this. If you cannot, you
+# will need to generate wheels for each Python version that you support.
+universal=1
+
+[nosetests]
+tests=roles/openshift_master_facts/test/, test/
+verbosity=2
+with-coverage=1
+cover-html=1
+cover-inclusive=1
+cover-min-percentage=70
+cover-erase=1
+detailed-errors=1
+cover-branches=1
+
+[yamllint]
+excludes=.tox,utils,files
+
+[lint]
+lint_disable=fixme,locally-disabled,file-ignored,duplicate-code
+
+[flake8]
+exclude=.tox/*,utils/*,inventory/*
+max_line_length = 120
+ignore = E501,T003
diff --git a/setup.py b/setup.py
new file mode 100644
index 000000000..372aca9ff
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,245 @@
+"""A setuptools based setup module.
+
+"""
+from __future__ import print_function
+
+import os
+import fnmatch
+import re
+import sys
+import yaml
+
+# Always prefer setuptools over distutils
+from setuptools import setup, Command
+from setuptools_lint.setuptools_command import PylintCommand
+from six import string_types
+from yamllint.config import YamlLintConfig
+from yamllint.cli import Format
+from yamllint import linter
+
+
+def find_files(base_dir, exclude_dirs, include_dirs, file_regex):
+ ''' find files matching file_regex '''
+ found = []
+ exclude_regex = ''
+ include_regex = ''
+
+ if exclude_dirs is not None:
+ exclude_regex = r'|'.join([fnmatch.translate(x) for x in exclude_dirs]) or r'$.'
+
+ if include_dirs is not None:
+ include_regex = r'|'.join([fnmatch.translate(x) for x in include_dirs]) or r'$.'
+
+ for root, dirs, files in os.walk(base_dir):
+ if exclude_dirs is not None:
+ # filter out excludes for dirs
+ dirs[:] = [d for d in dirs if not re.match(exclude_regex, d)]
+
+ if include_dirs is not None:
+ # filter for includes for dirs
+ dirs[:] = [d for d in dirs if re.match(include_regex, d)]
+
+ matches = [os.path.join(root, f) for f in files if re.search(file_regex, f) is not None]
+ found.extend(matches)
+
+ return found
+
+
+class OpenShiftAnsibleYamlLint(Command):
+ ''' Command to run yamllint '''
+ description = "Run yamllint tests"
+ user_options = [
+ ('excludes=', 'e', 'directories to exclude'),
+ ('config-file=', 'c', 'config file to use'),
+ ('format=', 'f', 'format to use (standard, parsable)'),
+ ]
+
+ def initialize_options(self):
+ ''' initialize_options '''
+ # Reason: Defining these attributes as a part of initialize_options is
+ # consistent with upstream usage
+ # Status: permanently disabled
+ # pylint: disable=attribute-defined-outside-init
+ self.excludes = None
+ self.config_file = None
+ self.format = None
+
+ def finalize_options(self):
+ ''' finalize_options '''
+ # Reason: These attributes are defined in initialize_options and this
+ # usage is consistant with upstream usage
+ # Status: permanently disabled
+ # pylint: disable=attribute-defined-outside-init
+ if isinstance(self.excludes, string_types):
+ self.excludes = self.excludes.split(',')
+ if self.format is None:
+ self.format = 'standard'
+ assert (self.format in ['standard', 'parsable']), (
+ 'unknown format {0}.'.format(self.format))
+ if self.config_file is None:
+ self.config_file = '.yamllint'
+ assert os.path.isfile(self.config_file), (
+ 'yamllint config file {0} does not exist.'.format(self.config_file))
+
+ def run(self):
+ ''' run command '''
+ if self.excludes is not None:
+ print("Excludes:\n{0}".format(yaml.dump(self.excludes, default_flow_style=False)))
+
+ config = YamlLintConfig(file=self.config_file)
+
+ has_errors = False
+ has_warnings = False
+
+ if self.format == 'parsable':
+ format_method = Format.parsable
+ else:
+ format_method = Format.standard_color
+
+ for yaml_file in find_files(os.getcwd(), self.excludes, None, r'\.ya?ml$'):
+ first = True
+ with open(yaml_file, 'r') as contents:
+ for problem in linter.run(contents, config):
+ if first and self.format != 'parsable':
+ print('\n{0}:'.format(os.path.relpath(yaml_file)))
+ first = False
+
+ print(format_method(problem, yaml_file))
+ if problem.level == linter.PROBLEM_LEVELS[2]:
+ has_errors = True
+ elif problem.level == linter.PROBLEM_LEVELS[1]:
+ has_warnings = True
+
+ if has_errors or has_warnings:
+ print('yammlint issues found')
+ raise SystemExit(1)
+
+
+class OpenShiftAnsiblePylint(PylintCommand):
+ ''' Class to override the default behavior of PylintCommand '''
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def find_all_modules(self):
+ ''' find all python files to test '''
+ exclude_dirs = ['.tox', 'utils', 'test', 'tests', 'git']
+ modules = []
+ for match in find_files(os.getcwd(), exclude_dirs, None, r'\.py$'):
+ package = os.path.basename(match).replace('.py', '')
+ modules.append(('openshift_ansible', package, match))
+ return modules
+
+ def get_finalized_command(self, cmd):
+ ''' override get_finalized_command to ensure we use our
+ find_all_modules method '''
+ if cmd == 'build_py':
+ return self
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def with_project_on_sys_path(self, func, func_args, func_kwargs):
+ ''' override behavior, since we don't need to build '''
+ return func(*func_args, **func_kwargs)
+
+
+class OpenShiftAnsibleGenerateValidation(Command):
+ ''' Command to run generated module validation'''
+ description = "Run generated module validation"
+ user_options = []
+
+ def initialize_options(self):
+ ''' initialize_options '''
+ pass
+
+ def finalize_options(self):
+ ''' finalize_options '''
+ pass
+
+ # self isn't used but I believe is required when it is called.
+ # pylint: disable=no-self-use
+ def run(self):
+ ''' run command '''
+ # find the files that call generate
+ generate_files = find_files('roles',
+ ['inventory',
+ 'test',
+ 'playbooks',
+ 'utils'],
+ None,
+ 'generate.py$')
+
+ if len(generate_files) < 1:
+ print('Did not find any code generation. Please verify module code generation.') # noqa: E501
+ raise SystemExit(1)
+
+ errors = False
+ for gen in generate_files:
+ print('Checking generated module code: {0}'.format(gen))
+ try:
+ sys.path.insert(0, os.path.dirname(gen))
+ # we are importing dynamically. This isn't in
+ # the python path.
+ # pylint: disable=import-error
+ import generate
+ generate.verify()
+ except generate.GenerateAnsibleException as gae:
+ print(gae.args)
+ errors = True
+
+ if errors:
+ print('Found errors while generating module code.')
+ raise SystemExit(1)
+
+ print('\nAll generate scripts passed.\n')
+
+
+class UnsupportedCommand(Command):
+ ''' Basic Command to override unsupported commands '''
+ user_options = []
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def initialize_options(self):
+ ''' initialize_options '''
+ pass
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def finalize_options(self):
+ ''' initialize_options '''
+ pass
+
+ # Reason: This method needs to be an instance method to conform to the
+ # overridden method's signature
+ # Status: permanently disabled
+ # pylint: disable=no-self-use
+ def run(self):
+ ''' run command '''
+ print("Unsupported command for openshift-ansible")
+
+
+setup(
+ name='openshift-ansible',
+ license="Apache 2.0",
+ cmdclass={
+ 'install': UnsupportedCommand,
+ 'develop': UnsupportedCommand,
+ 'build': UnsupportedCommand,
+ 'build_py': UnsupportedCommand,
+ 'build_ext': UnsupportedCommand,
+ 'egg_info': UnsupportedCommand,
+ 'sdist': UnsupportedCommand,
+ 'lint': OpenShiftAnsiblePylint,
+ 'yamllint': OpenShiftAnsibleYamlLint,
+ 'generate_validation': OpenShiftAnsibleGenerateValidation,
+ },
+ packages=[],
+)
diff --git a/test-requirements.txt b/test-requirements.txt
new file mode 100644
index 000000000..2ee1e657d
--- /dev/null
+++ b/test-requirements.txt
@@ -0,0 +1,11 @@
+six
+pyOpenSSL
+flake8
+flake8-mutable
+flake8-print
+pylint
+setuptools-lint
+PyYAML
+yamllint
+nose
+coverage
diff --git a/tox.ini b/tox.ini
new file mode 100644
index 000000000..4d3594023
--- /dev/null
+++ b/tox.ini
@@ -0,0 +1,19 @@
+[tox]
+minversion=2.3.1
+envlist =
+ py{27,35}-ansible22-{pylint,unit,flake8,yamllint,generate_validation}
+skipsdist=True
+skip_missing_interpreters=True
+
+[testenv]
+deps =
+ -rtest-requirements.txt
+ py35-flake8: flake8-bugbear
+ ansible22: ansible~=2.2
+
+commands =
+ flake8: flake8
+ pylint: python setup.py lint
+ yamllint: python setup.py yamllint
+ unit: nosetests
+ generate_validation: python setup.py generate_validation
diff --git a/utils/.coveragerc b/utils/.coveragerc
index 0c870af42..e1d918755 100644
--- a/utils/.coveragerc
+++ b/utils/.coveragerc
@@ -1,4 +1,5 @@
[run]
omit=
*/lib/python*/site-packages/*
+ */lib/python*/*
/usr/*
diff --git a/utils/.pylintrc b/utils/.pylintrc
new file mode 120000
index 000000000..30b33b524
--- /dev/null
+++ b/utils/.pylintrc
@@ -0,0 +1 @@
+../.pylintrc \ No newline at end of file
diff --git a/utils/Makefile b/utils/Makefile
index c061edd8c..038c31fcf 100644
--- a/utils/Makefile
+++ b/utils/Makefile
@@ -30,11 +30,12 @@ SHORTNAME := ooinstall
# directory of the target file ($@), kinda like `dirname`.
ASCII2MAN = a2x -D $(dir $@) -d manpage -f manpage $<
MANPAGES := docs/man/man1/atomic-openshift-installer.1
-VERSION := 1.3
+# slipped into the manpage template before a2x processing
+VERSION := 1.4
# YAMLFILES: Skipping all '/files/' folders due to conflicting yaml file definitions
-YAMLFILES = $(shell find ../ -name $(VENV) -prune -o \( -name '*.yml' -o -name '*.yaml' \) ! -path "*/files/*" 2>&1)
-PYFILES = $(shell find ../ -name $(VENV) -prune -o -name ooinstall.egg-info -prune -o -name test -prune -o -name "*.py" -print)
+YAMLFILES = $(shell find ../ -name $(VENV) -prune -o -name .tox -prune -o \( -name '*.yml' -o -name '*.yaml' \) ! -path "*/files/*" -print 2>&1)
+PYFILES = $(shell find ../ -name $(VENV) -prune -o -name ooinstall.egg-info -prune -o -name test -prune -o -name .tox -prune -o -name "*.py" -print)
sdist: clean
python setup.py sdist
@@ -45,7 +46,7 @@ clean:
@find . -type f \( -name "*~" -or -name "#*" \) -delete
@rm -fR build dist rpm-build MANIFEST htmlcov .coverage cover ooinstall.egg-info oo-install
@rm -fR $(VENV)
-
+ @rm -fR .tox
# To force a rebuild of the docs run 'touch' on any *.in file under
# docs/man/man1/
@@ -83,37 +84,27 @@ ci-unittests: $(VENV)
@echo "#############################################"
@echo "# Running Unit Tests in virtualenv"
@echo "#############################################"
- . $(VENV)/bin/activate && python setup.py nosetests
+ . $(VENV)/bin/activate && detox -e py27-unit,py35-unit
@echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
ci-pylint: $(VENV)
@echo "#############################################"
@echo "# Running PyLint Tests in virtualenv"
@echo "#############################################"
- . $(VENV)/bin/activate && python -m pylint --rcfile ../git/.pylintrc $(PYFILES)
-
-ci-yamllint: $(VENV)
- @echo "#############################################"
- @echo "# Running yamllint Tests in virtualenv"
- @echo "#############################################"
- @. $(VENV)/bin/activate && yamllint -c ../git/.yamllint $(YAMLFILES)
-
-ci-list-deps: $(VENV)
- @echo "#############################################"
- @echo "# Listing all pip deps"
- @echo "#############################################"
- . $(VENV)/bin/activate && pip freeze
+ . $(VENV)/bin/activate && detox -e py27-pylint,py35-pylint
ci-flake8: $(VENV)
@echo "#############################################"
@echo "# Running Flake8 Compliance Tests in virtualenv"
@echo "#############################################"
- . $(VENV)/bin/activate && flake8 --config=setup.cfg ../ --exclude="utils,../inventory"
- . $(VENV)/bin/activate && python setup.py flake8
+ . $(VENV)/bin/activate && detox -e py27-flake8,py35-flake8
+
+ci-tox: $(VENV)
+ . $(VENV)/bin/activate && detox
-ci: ci-list-deps ci-unittests ci-flake8 ci-pylint ci-yamllint
+ci: ci-tox
@echo
@echo "##################################################################################"
@echo "VIEW CODE COVERAGE REPORT WITH 'xdg-open cover/index.html' or run 'make viewcover'"
@echo "To clean your test environment run 'make clean'"
- @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-unittests', 'ci-flake8', 'ci-yamllint'"
+ @echo "Other targets you may run with 'make': 'ci-pylint', 'ci-tox', 'ci-unittests', 'ci-flake8'"
diff --git a/utils/README.md b/utils/README.md
index 2abf2705e..c37ab41e6 100644
--- a/utils/README.md
+++ b/utils/README.md
@@ -6,6 +6,47 @@ Run the command:
to run an array of unittests locally.
+Underneath the covers, we use [tox](http://readthedocs.org/docs/tox/) to manage virtualenvs and run
+tests. Alternatively, tests can be run using [detox](https://pypi.python.org/pypi/detox/) which allows
+for running tests in parallel
+
+
+```
+pip install tox detox
+```
+
+List the test environments available:
+```
+tox -l
+```
+
+Run all of the tests with:
+```
+tox
+```
+
+Run all of the tests in parallel with detox:
+```
+detox
+```
+
+Running a particular test environment (python 2.7 flake8 tests in this case):
+```
+tox -e py27-ansible22-flake8
+```
+
+Running a particular test environment in a clean virtualenv (python 3.5 pylint
+tests in this case):
+```
+tox -r -e py35-ansible22-pylint
+```
+
+If you want to enter the virtualenv created by tox to do additional
+testing/debugging (py27-flake8 env in this case):
+```
+source .tox/py27-ansible22-flake8/bin/activate
+```
+
You will get errors if the log files already exist and can not be
written to by the current user (`/tmp/ansible.log` and
`/tmp/installer.txt`). *We're working on it.*
diff --git a/utils/docs/man/man1/atomic-openshift-installer.1 b/utils/docs/man/man1/atomic-openshift-installer.1
index 072833ce8..827ce224b 100644
--- a/utils/docs/man/man1/atomic-openshift-installer.1
+++ b/utils/docs/man/man1/atomic-openshift-installer.1
@@ -2,12 +2,12 @@
.\" Title: atomic-openshift-installer
.\" Author: [see the "AUTHOR" section]
.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/>
-.\" Date: 10/20/2016
+.\" Date: 12/28/2016
.\" Manual: atomic-openshift-installer
-.\" Source: atomic-openshift-utils 1.3
+.\" Source: atomic-openshift-utils 1.4
.\" Language: English
.\"
-.TH "ATOMIC\-OPENSHIFT\-I" "1" "10/20/2016" "atomic\-openshift\-utils 1\&.3" "atomic\-openshift\-installer"
+.TH "ATOMIC\-OPENSHIFT\-I" "1" "12/28/2016" "atomic\-openshift\-utils 1\&.4" "atomic\-openshift\-installer"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
@@ -86,7 +86,7 @@ Show the usage help and exit\&.
.RE
.SH "COMMANDS"
.sp
-\fBatomic\-openshift\-installer\fR has three modes of operation:
+\fBatomic\-openshift\-installer\fR has four modes of operation:
.sp
.RS 4
.ie n \{\
diff --git a/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in b/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in
index 9b02c4d14..2917e9992 100644
--- a/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in
+++ b/utils/docs/man/man1/atomic-openshift-installer.1.asciidoc.in
@@ -68,7 +68,7 @@ Show the usage help and exit.
COMMANDS
--------
-**atomic-openshift-installer** has three modes of operation:
+**atomic-openshift-installer** has four modes of operation:
* **install**
* **uninstall**
diff --git a/utils/setup.cfg b/utils/setup.cfg
index ee3288fc5..862dffd7b 100644
--- a/utils/setup.cfg
+++ b/utils/setup.cfg
@@ -5,7 +5,6 @@
universal=1
[nosetests]
-tests=../,../roles/openshift_master_facts/test/,test/
verbosity=2
with-coverage=1
cover-html=1
@@ -17,5 +16,8 @@ cover-branches=1
[flake8]
max-line-length=120
-exclude=tests/*,setup.py
-ignore=E501,E121,E124
+exclude=test/*,setup.py,oo-installenv
+ignore=E501
+
+[lint]
+lint_disable=fixme,locally-disabled,file-ignored,duplicate-code
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 7e5ad4144..0bc9aa45e 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -501,7 +501,7 @@ def get_variant_and_version(multi_master=False):
i = 1
combos = get_variant_version_combos()
- for (variant, version) in combos:
+ for (variant, _) in combos:
message = "%s\n(%s) %s" % (message, i, variant.description)
i = i + 1
message = "%s\n" % message
@@ -1124,6 +1124,20 @@ def scaleup(ctx, gen_inventory):
click.echo('Welcome to the OpenShift Enterprise 3 Scaleup utility.')
+ # Scaleup requires manual data entry. Therefore, we do not support
+ # unattended operations.
+ if unattended:
+ msg = """
+---
+
+The 'scaleup' operation does not support unattended
+functionality. Re-run the installer without the '-u' or '--unattended'
+option to continue.
+"""
+ click.echo(msg)
+ sys.exit(1)
+
+ # Resume normal scaleup workflow
print_installation_summary(installed_hosts,
oo_cfg.settings['variant_version'],
verbose=False,)
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index 64eb340f3..cf14105af 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -1,5 +1,7 @@
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-instance-attributes,too-few-public-methods
+from __future__ import (absolute_import, print_function)
+
import os
import sys
import logging
@@ -50,7 +52,7 @@ Error loading config. {}.
See https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html#defining-an-installation-configuration-file
for information on creating a configuration file or delete {} and re-run the installer.
"""
- print message.format(error, path)
+ print(message.format(error, path))
class OOConfigFileError(Exception):
@@ -103,7 +105,7 @@ class Host(object):
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
- for variable, value in self.other_variables.iteritems():
+ for variable, value in self.other_variables.items():
d[variable] = value
return d
@@ -256,16 +258,16 @@ class OOConfig(object):
# Parse the hosts into DTO objects:
for host in host_list:
host['other_variables'] = {}
- for variable, value in host.iteritems():
+ for variable, value in host.items():
if variable not in HOST_VARIABLES_BLACKLIST:
host['other_variables'][variable] = value
self.deployment.hosts.append(Host(**host))
# Parse the roles into Objects
- for name, variables in role_list.iteritems():
+ for name, variables in role_list.items():
self.deployment.roles.update({name: Role(name, variables)})
- except IOError, ferr:
+ except IOError as ferr:
raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename,
ferr.strerror))
except yaml.scanner.ScannerError:
@@ -354,14 +356,13 @@ class OOConfig(object):
self.settings['ansible_inventory_path'] = \
'{}/hosts'.format(os.path.dirname(self.config_path))
- # pylint: disable=consider-iterating-dictionary
- # Disabled because we shouldn't alter the container we're
- # iterating over
- #
# clean up any empty sets
- for setting in self.settings.keys():
+ empty_keys = []
+ for setting in self.settings:
if not self.settings[setting]:
- self.settings.pop(setting)
+ empty_keys.append(setting)
+ for key in empty_keys:
+ self.settings.pop(key)
installer_log.debug("Updated OOConfig settings: %s", self.settings)
@@ -410,7 +411,7 @@ class OOConfig(object):
for host in self.deployment.hosts:
p_settings['deployment']['hosts'].append(host.to_dict())
- for name, role in self.deployment.roles.iteritems():
+ for name, role in self.deployment.roles.items():
p_settings['deployment']['roles'][name] = role.variables
for setting in self.deployment.variables:
@@ -424,7 +425,7 @@ class OOConfig(object):
if self.settings['ansible_inventory_directory'] != self._default_ansible_inv_dir():
p_settings['ansible_inventory_directory'] = self.settings['ansible_inventory_directory']
except KeyError as e:
- print "Error persisting settings: {}".format(e)
+ print("Error persisting settings: {}".format(e))
sys.exit(0)
return p_settings
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index f542fb376..ce6e54664 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -1,5 +1,7 @@
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned
+from __future__ import (absolute_import, print_function)
+
import socket
import subprocess
import sys
@@ -107,12 +109,12 @@ def write_inventory_vars(base_inventory, lb):
global CFG
base_inventory.write('\n[OSEv3:vars]\n')
- for variable, value in CFG.settings.iteritems():
+ for variable, value in CFG.settings.items():
inventory_var = VARIABLES_MAP.get(variable, None)
if inventory_var and value:
base_inventory.write('{}={}\n'.format(inventory_var, value))
- for variable, value in CFG.deployment.variables.iteritems():
+ for variable, value in CFG.deployment.variables.items():
inventory_var = VARIABLES_MAP.get(variable, variable)
if value:
base_inventory.write('{}={}\n'.format(inventory_var, value))
@@ -152,11 +154,11 @@ def write_inventory_vars(base_inventory, lb):
"'baseurl': '{}', "
"'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))
- for name, role_obj in CFG.deployment.roles.iteritems():
+ for name, role_obj in CFG.deployment.roles.items():
if role_obj.variables:
group_name = ROLES_TO_GROUPS_MAP.get(name, name)
base_inventory.write("\n[{}:vars]\n".format(group_name))
- for variable, value in role_obj.variables.iteritems():
+ for variable, value in role_obj.variables.items():
inventory_var = VARIABLES_MAP.get(variable, variable)
if value:
base_inventory.write('{}={}\n'.format(inventory_var, value))
@@ -193,7 +195,7 @@ def write_host(host, role, inventory, schedulable=None):
facts += ' {}={}'.format(HOST_VARIABLES_MAP.get(prop), getattr(host, prop))
if host.other_variables:
- for variable, value in host.other_variables.iteritems():
+ for variable, value in host.other_variables.items():
facts += " {}={}".format(variable, value)
if host.node_labels and role == 'node':
@@ -210,9 +212,9 @@ def write_host(host, role, inventory, schedulable=None):
if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
facts += ' ansible_connection=local'
if os.geteuid() != 0:
- no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', 'openshift'])
+ no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', '-n'])
if no_pwd_sudo == 1:
- print 'The atomic-openshift-installer requires sudo access without a password.'
+ print('The atomic-openshift-installer requires sudo access without a password.')
sys.exit(1)
facts += ' ansible_become=yes'
@@ -245,9 +247,9 @@ def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
installer_log.debug("Going to try to read this file: %s", CFG.settings['ansible_callback_facts_yaml'])
try:
callback_facts = yaml.safe_load(callback_facts_file)
- except yaml.YAMLError, exc:
- print "Error in {}".format(CFG.settings['ansible_callback_facts_yaml']), exc
- print "Try deleting and rerunning the atomic-openshift-installer"
+ except yaml.YAMLError as exc:
+ print("Error in {}".format(CFG.settings['ansible_callback_facts_yaml']), exc)
+ print("Try deleting and rerunning the atomic-openshift-installer")
sys.exit(1)
return callback_facts, 0
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
index 39772bb2e..a45be98bf 100644
--- a/utils/src/ooinstall/variants.py
+++ b/utils/src/ooinstall/variants.py
@@ -38,32 +38,24 @@ class Variant(object):
# WARNING: Keep the versions ordered, most recent first:
-OSE = Variant('openshift-enterprise', 'OpenShift Container Platform',
- [
- Version('3.4', 'openshift-enterprise'),
- ]
-)
-
-REG = Variant('openshift-enterprise', 'Registry',
- [
- Version('3.4', 'openshift-enterprise', 'registry'),
- ]
-)
-
-origin = Variant('origin', 'OpenShift Origin',
- [
- Version('1.4', 'origin'),
- ]
-)
-
-LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform',
- [
- Version('3.3', 'openshift-enterprise'),
- Version('3.2', 'openshift-enterprise'),
- Version('3.1', 'openshift-enterprise'),
- Version('3.0', 'openshift-enterprise'),
- ]
-)
+OSE = Variant('openshift-enterprise', 'OpenShift Container Platform', [
+ Version('3.4', 'openshift-enterprise'),
+])
+
+REG = Variant('openshift-enterprise', 'Registry', [
+ Version('3.4', 'openshift-enterprise', 'registry'),
+])
+
+origin = Variant('origin', 'OpenShift Origin', [
+ Version('1.4', 'origin'),
+])
+
+LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform', [
+ Version('3.3', 'openshift-enterprise'),
+ Version('3.2', 'openshift-enterprise'),
+ Version('3.1', 'openshift-enterprise'),
+ Version('3.0', 'openshift-enterprise'),
+])
# Ordered list of variants we can install, first is the default.
SUPPORTED_VARIANTS = (OSE, REG, origin, LEGACY)
diff --git a/utils/test-requirements.txt b/utils/test-requirements.txt
index b70a03563..aebfe7c39 100644
--- a/utils/test-requirements.txt
+++ b/utils/test-requirements.txt
@@ -1,7 +1,7 @@
ansible
-enum
configparser
pylint
+setuptools-lint
nose
coverage
mock
@@ -11,3 +11,7 @@ click
backports.functools_lru_cache
pyOpenSSL
yamllint
+tox
+detox
+# Temporary work-around for flake8 vs maccabe version conflict
+mccabe==0.5.3
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index 36dc18034..0cb37eaff 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -4,7 +4,8 @@
import copy
import os
-import ConfigParser
+
+from six.moves import configparser
import ooinstall.cli_installer as cli
@@ -408,7 +409,7 @@ class UnattendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli, self.cli_args)
if result.exception is None or result.exit_code != 1:
- print "Exit code: %s" % result.exit_code
+ print("Exit code: %s" % result.exit_code)
self.fail("Unexpected CLI return")
# unattended with config file and all installed hosts (with --force)
@@ -523,7 +524,7 @@ class UnattendedCliTests(OOCliFixture):
self.assert_result(result, 0)
# Check the inventory file looks as we would expect:
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assertEquals('root',
inventory.get('OSEv3:vars', 'ansible_ssh_user'))
@@ -566,7 +567,7 @@ class UnattendedCliTests(OOCliFixture):
self.assertEquals('3.3', written_config['variant_version'])
# Make sure the correct value was passed to ansible:
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assertEquals('openshift-enterprise',
inventory.get('OSEv3:vars', 'deployment_type'))
@@ -594,7 +595,7 @@ class UnattendedCliTests(OOCliFixture):
# and written to disk:
self.assertEquals('3.3', written_config['variant_version'])
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assertEquals('openshift-enterprise',
inventory.get('OSEv3:vars', 'deployment_type'))
@@ -830,7 +831,7 @@ class AttendedCliTests(OOCliFixture):
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 4)
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
'openshift_schedulable=False')
@@ -949,7 +950,7 @@ class AttendedCliTests(OOCliFixture):
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 6)
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
'openshift_schedulable=False')
@@ -990,7 +991,7 @@ class AttendedCliTests(OOCliFixture):
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 5)
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
'openshift_schedulable=True')
@@ -1082,7 +1083,7 @@ class AttendedCliTests(OOCliFixture):
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 1)
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
'openshift_schedulable=True')
@@ -1116,7 +1117,7 @@ class AttendedCliTests(OOCliFixture):
written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 4)
- inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
self.assert_inventory_host_var(inventory, 'nodes', '10.0.0.1',
'openshift_schedulable=False')
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
index 62135c761..5200d275d 100644
--- a/utils/test/fixture.py
+++ b/utils/test/fixture.py
@@ -65,13 +65,13 @@ class OOCliFixture(OOInstallFixture):
def assert_result(self, result, exit_code):
if result.exit_code != exit_code:
- print "Unexpected result from CLI execution"
- print "Exit code: %s" % result.exit_code
- print "Exception: %s" % result.exception
- print result.exc_info
+ print("Unexpected result from CLI execution")
+ print("Exit code: %s" % result.exit_code)
+ print("Exception: %s" % result.exception)
+ print(result.exc_info)
import traceback
traceback.print_exception(*result.exc_info)
- print "Output:\n%s" % result.output
+ print("Output:\n%s" % result.output)
self.fail("Exception during CLI execution")
def _verify_load_facts(self, load_facts_mock):
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
index 56fd82408..2b4fce512 100644
--- a/utils/test/oo_config_tests.py
+++ b/utils/test/oo_config_tests.py
@@ -2,13 +2,14 @@
# repo. We will work on these over time.
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name
-import cStringIO
import os
import unittest
import tempfile
import shutil
import yaml
+from six.moves import cStringIO
+
from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError
import ooinstall.openshift_ansible
@@ -244,7 +245,7 @@ class HostTests(OOInstallFixture):
}
new_node = Host(**yaml_props)
- inventory = cStringIO.StringIO()
+ inventory = cStringIO()
# This is what the 'write_host' function generates. write_host
# has no return value, it just writes directly to the file
# 'inventory' which in this test-case is a StringIO object
@@ -285,7 +286,7 @@ class HostTests(OOInstallFixture):
# }
# new_node = Host(**yaml_props)
- # inventory = cStringIO.StringIO()
+ # inventory = cStringIO()
# # This is what the original 'write_host' function will
# # generate. write_host has no return value, it just writes
diff --git a/utils/test/openshift_ansible_tests.py b/utils/test/openshift_ansible_tests.py
new file mode 100644
index 000000000..5847fe37b
--- /dev/null
+++ b/utils/test/openshift_ansible_tests.py
@@ -0,0 +1,71 @@
+import os
+import unittest
+import tempfile
+import shutil
+import yaml
+
+from six.moves import configparser
+
+from ooinstall import openshift_ansible
+from ooinstall.oo_config import Host, OOConfig
+
+
+BASE_CONFIG = """
+---
+variant: openshift-enterprise
+variant_version: 3.3
+version: v2
+deployment:
+ ansible_ssh_user: cloud-user
+ hosts: []
+ roles:
+ master:
+ node:
+"""
+
+
+class TestOpenShiftAnsible(unittest.TestCase):
+
+ def setUp(self):
+ self.tempfiles = []
+ self.work_dir = tempfile.mkdtemp(prefix='openshift_ansible_tests')
+ self.configfile = os.path.join(self.work_dir, 'ooinstall.config')
+ with open(self.configfile, 'w') as config_file:
+ config_file.write(BASE_CONFIG)
+ self.inventory = os.path.join(self.work_dir, 'hosts')
+ config = OOConfig(self.configfile)
+ config.settings['ansible_inventory_path'] = self.inventory
+ openshift_ansible.set_config(config)
+
+ def tearDown(self):
+ shutil.rmtree(self.work_dir)
+
+ def generate_hosts(self, num_hosts, name_prefix, roles=None, new_host=False):
+ hosts = []
+ for num in range(1, num_hosts + 1):
+ hosts.append(Host(connect_to=name_prefix + str(num),
+ roles=roles, new_host=new_host))
+ return hosts
+
+ def test_generate_inventory_new_nodes(self):
+ hosts = self.generate_hosts(1, 'master', roles=(['master', 'etcd']))
+ hosts.extend(self.generate_hosts(1, 'node', roles=['node']))
+ hosts.extend(self.generate_hosts(1, 'new_node', roles=['node'], new_host=True))
+ openshift_ansible.generate_inventory(hosts)
+ inventory = configparser.ConfigParser(allow_no_value=True)
+ inventory.read(self.inventory)
+ self.assertTrue(inventory.has_section('new_nodes'))
+ self.assertTrue(inventory.has_option('new_nodes', 'new_node1'))
+
+ def test_write_inventory_vars_role_vars(self):
+ with open(self.inventory, 'w') as inv:
+ openshift_ansible.CFG.deployment.roles['master'].variables={'color': 'blue'}
+ openshift_ansible.CFG.deployment.roles['node'].variables={'color': 'green'}
+ openshift_ansible.write_inventory_vars(inv, None)
+
+ inventory = configparser.ConfigParser(allow_no_value=True)
+ inventory.read(self.inventory)
+ self.assertTrue(inventory.has_section('masters:vars'))
+ self.assertEquals('blue', inventory.get('masters:vars', 'color'))
+ self.assertTrue(inventory.has_section('nodes:vars'))
+ self.assertEquals('green', inventory.get('nodes:vars', 'color'))
diff --git a/utils/test/test_utils.py b/utils/test/test_utils.py
index 2e59d86f2..cbce64f7e 100644
--- a/utils/test/test_utils.py
+++ b/utils/test/test_utils.py
@@ -2,6 +2,7 @@
Unittests for ooinstall utils.
"""
+import six
import unittest
import logging
import sys
@@ -28,9 +29,6 @@ class TestUtils(unittest.TestCase):
mock.call('OO_FOO: bar'),
]
- # python 2.x has assertItemsEqual, python 3.x has assertCountEqual
- if sys.version_info.major > 3:
- self.assertItemsEqual = self.assertCountEqual
######################################################################
# Validate ooinstall.utils.debug_env functionality
@@ -40,7 +38,6 @@ class TestUtils(unittest.TestCase):
with mock.patch('ooinstall.utils.installer_log') as _il:
debug_env(self.debug_all_params)
- print _il.debug.call_args_list
# Debug was called for each item we expect
self.assertEqual(
@@ -48,7 +45,8 @@ class TestUtils(unittest.TestCase):
_il.debug.call_count)
# Each item we expect was logged
- self.assertItemsEqual(
+ six.assertCountEqual(
+ self,
self.expected,
_il.debug.call_args_list)
@@ -67,7 +65,8 @@ class TestUtils(unittest.TestCase):
_il.debug.call_count,
len(debug_some_params))
- self.assertItemsEqual(
+ six.assertCountEqual(
+ self,
self.expected,
_il.debug.call_args_list)
diff --git a/utils/tox.ini b/utils/tox.ini
new file mode 100644
index 000000000..1308f7505
--- /dev/null
+++ b/utils/tox.ini
@@ -0,0 +1,16 @@
+[tox]
+minversion=2.3.1
+envlist =
+ py{27,35}-{flake8,unit,pylint}
+skipsdist=True
+skip_missing_interpreters=True
+
+[testenv]
+usedevelop=True
+deps =
+ -rtest-requirements.txt
+ py35-flake8: flake8-bugbear
+commands =
+ flake8: python setup.py flake8
+ unit: python setup.py nosetests
+ pylint: python setup.py lint