summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--.tito/packages/openshift-ansible-bin1
-rw-r--r--.tito/packages/openshift-ansible-inventory1
-rw-r--r--.tito/releasers.conf4
-rw-r--r--README.md2
-rw-r--r--README_AWS.md15
-rw-r--r--README_GCE.md8
-rw-r--r--README_origin.md15
-rw-r--r--README_vagrant.md4
-rw-r--r--Vagrantfile21
-rwxr-xr-xbin/cluster57
-rw-r--r--filter_plugins/oo_filters.py95
-rw-r--r--filter_plugins/openshift_master.py469
-rwxr-xr-xgit/pylint.sh2
-rw-r--r--inventory/aws/hosts/ec2.ini97
-rwxr-xr-xinventory/aws/hosts/ec2.py645
-rw-r--r--inventory/byo/hosts.example30
-rwxr-xr-xinventory/multi_inventory.py34
-rw-r--r--openshift-ansible.spec157
-rw-r--r--playbooks/adhoc/bootstrap-fedora.yml5
-rw-r--r--playbooks/adhoc/uninstall.yml66
-rw-r--r--playbooks/aws/openshift-cluster/addNodes.yml39
-rw-r--r--playbooks/aws/openshift-cluster/scaleup.yml34
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml35
-rw-r--r--playbooks/aws/openshift-cluster/templates/user_data.j211
-rw-r--r--playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml33
-rw-r--r--playbooks/byo/openshift-cluster/scaleup.yml10
-rw-r--r--playbooks/byo/openshift_facts.yml3
-rw-r--r--playbooks/common/openshift-cluster/config.yml3
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml13
-rw-r--r--playbooks/common/openshift-cluster/scaleup.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/versions.sh4
-rwxr-xr-xplaybooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml138
-rw-r--r--playbooks/common/openshift-etcd/config.yml2
-rw-r--r--playbooks/common/openshift-master/config.yml83
-rw-r--r--playbooks/common/openshift-node/config.yml4
-rw-r--r--playbooks/gce/openshift-cluster/launch.yml4
-rw-r--r--playbooks/gce/openshift-cluster/tasks/launch_instances.yml4
-rw-r--r--playbooks/gce/openshift-cluster/vars.yml3
-rw-r--r--playbooks/libvirt/openshift-cluster/list.yml8
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/domain.xml1
-rw-r--r--playbooks/openstack/openshift-cluster/list.yml8
-rw-r--r--rel-eng/packages/.readme3
-rw-r--r--rel-eng/packages/openshift-ansible-bin1
-rw-r--r--rel-eng/packages/openshift-ansible-inventory1
-rw-r--r--rel-eng/tito.props5
-rw-r--r--roles/ansible/tasks/main.yml7
-rw-r--r--roles/cockpit/tasks/main.yml12
-rw-r--r--roles/copr_cli/README.md38
-rw-r--r--roles/copr_cli/defaults/main.yml2
-rw-r--r--roles/copr_cli/handlers/main.yml2
-rw-r--r--roles/copr_cli/meta/main.yml14
-rw-r--r--roles/copr_cli/tasks/main.yml10
-rw-r--r--roles/copr_cli/vars/main.yml2
-rw-r--r--roles/docker/README.md18
-rw-r--r--roles/docker/handlers/main.yml5
-rw-r--r--roles/docker/meta/main.yml128
-rw-r--r--roles/docker/tasks/main.yml7
-rw-r--r--roles/docker/tasks/udev_workaround.yml30
-rw-r--r--roles/docker/vars/main.yml3
-rw-r--r--roles/etcd/README.md2
-rw-r--r--roles/etcd/tasks/main.yml5
-rw-r--r--roles/flannel/README.md3
-rw-r--r--roles/flannel/tasks/main.yml6
-rw-r--r--roles/fluentd_master/tasks/main.yml7
-rw-r--r--roles/fluentd_node/tasks/main.yml7
-rw-r--r--roles/haproxy/tasks/main.yml7
-rw-r--r--roles/kube_nfs_volumes/tasks/main.yml5
-rw-r--r--roles/kube_nfs_volumes/tasks/nfs.yml5
-rw-r--r--roles/lib_zabbix/library/zbx_action.py7
-rw-r--r--roles/lib_zabbix/library/zbx_graph.py331
-rw-r--r--roles/lib_zabbix/library/zbx_graphprototype.py331
-rw-r--r--roles/lib_zabbix/library/zbx_httptest.py290
-rw-r--r--roles/lib_zabbix/library/zbx_item.py20
-rw-r--r--roles/lib_zabbix/library/zbx_itemprototype.py20
-rw-r--r--roles/lib_zabbix/library/zbx_usergroup.py64
-rw-r--r--roles/lib_zabbix/tasks/create_template.yml26
-rw-r--r--roles/openshift_ansible_inventory/tasks/main.yml10
-rw-r--r--roles/openshift_cluster_metrics/tasks/main.yml6
-rw-r--r--roles/openshift_common/tasks/main.yml11
-rw-r--r--roles/openshift_examples/defaults/main.yml4
-rwxr-xr-xroles/openshift_examples/examples-sync.sh7
-rw-r--r--roles/openshift_examples/files/examples/README.md7
-rw-r--r--roles/openshift_examples/files/examples/v1.0/db-templates/mongodb-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/db-templates/mongodb-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/db-templates/mongodb-persistent-template.json (renamed from roles/openshift_examples/files/examples/db-templates/mongodb-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/db-templates/mysql-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/db-templates/mysql-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/db-templates/mysql-persistent-template.json (renamed from roles/openshift_examples/files/examples/db-templates/mysql-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/db-templates/postgresql-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/db-templates/postgresql-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/db-templates/postgresql-persistent-template.json (renamed from roles/openshift_examples/files/examples/db-templates/postgresql-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/image-streams/image-streams-centos7-v1-0.json285
-rw-r--r--roles/openshift_examples/files/examples/v1.0/image-streams/image-streams-centos7.json (renamed from roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/image-streams/image-streams-rhel7-v1-0.json254
-rw-r--r--roles/openshift_examples/files/examples/v1.0/image-streams/image-streams-rhel7.json (renamed from roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/logging-deployer.yaml (renamed from roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/metrics-deployer.yaml116
-rw-r--r--roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/logging-deployer.yaml (renamed from roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/metrics-deployer.yaml (renamed from roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml)4
-rw-r--r--roles/openshift_examples/files/examples/v1.0/quickstart-templates/cakephp-mysql.json (renamed from roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/quickstart-templates/cakephp.json (renamed from roles/openshift_examples/files/examples/quickstart-templates/cakephp.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/quickstart-templates/dancer-mysql.json (renamed from roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/quickstart-templates/dancer.json (renamed from roles/openshift_examples/files/examples/quickstart-templates/dancer.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/quickstart-templates/django-postgresql.json (renamed from roles/openshift_examples/files/examples/quickstart-templates/django-postgresql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/quickstart-templates/django.json (renamed from roles/openshift_examples/files/examples/quickstart-templates/django.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/quickstart-templates/jenkins-ephemeral-template.json (renamed from roles/openshift_examples/files/examples/quickstart-templates/jenkins-ephemeral-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/quickstart-templates/jenkins-persistent-template.json (renamed from roles/openshift_examples/files/examples/quickstart-templates/jenkins-persistent-template.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/quickstart-templates/nodejs-mongodb.json (renamed from roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/quickstart-templates/nodejs.json (renamed from roles/openshift_examples/files/examples/quickstart-templates/nodejs.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/quickstart-templates/rails-postgresql.json (renamed from roles/openshift_examples/files/examples/quickstart-templates/rails-postgresql.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-streams/jboss-image-streams.json (renamed from roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/amq62-basic.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/amq62-basic.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/amq62-persistent-ssl.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent-ssl.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/amq62-persistent.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/amq62-ssl.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/amq62-ssl.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-amq-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-amq-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-basic-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap64-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-https-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap64-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-basic-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-https-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-basic-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-basic-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-https-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-https-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-mongodb-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-mysql-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-postgresql-s2i.json (renamed from roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-s2i.json)0
-rw-r--r--roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-ephemeral-template.json184
-rw-r--r--roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-persistent-template.json207
-rw-r--r--roles/openshift_examples/files/examples/v1.1/db-templates/mysql-ephemeral-template.json173
-rw-r--r--roles/openshift_examples/files/examples/v1.1/db-templates/mysql-persistent-template.json196
-rw-r--r--roles/openshift_examples/files/examples/v1.1/db-templates/postgresql-ephemeral-template.json173
-rw-r--r--roles/openshift_examples/files/examples/v1.1/db-templates/postgresql-persistent-template.json196
-rw-r--r--roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-centos7.json412
-rw-r--r--roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-rhel7.json378
-rw-r--r--roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/logging-deployer.yaml151
-rw-r--r--roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml116
-rw-r--r--roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/logging-deployer.yaml151
-rw-r--r--roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/metrics-deployer.yaml (renamed from roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml)4
-rw-r--r--roles/openshift_examples/files/examples/v1.1/quickstart-templates/cakephp-mysql.json378
-rw-r--r--roles/openshift_examples/files/examples/v1.1/quickstart-templates/cakephp.json275
-rw-r--r--roles/openshift_examples/files/examples/v1.1/quickstart-templates/dancer-mysql.json348
-rw-r--r--roles/openshift_examples/files/examples/v1.1/quickstart-templates/dancer.json211
-rw-r--r--roles/openshift_examples/files/examples/v1.1/quickstart-templates/django-postgresql.json346
-rw-r--r--roles/openshift_examples/files/examples/v1.1/quickstart-templates/django.json254
-rw-r--r--roles/openshift_examples/files/examples/v1.1/quickstart-templates/jenkins-ephemeral-template.json150
-rw-r--r--roles/openshift_examples/files/examples/v1.1/quickstart-templates/jenkins-persistent-template.json173
-rw-r--r--roles/openshift_examples/files/examples/v1.1/quickstart-templates/nodejs-mongodb.json346
-rw-r--r--roles/openshift_examples/files/examples/v1.1/quickstart-templates/nodejs.json248
-rw-r--r--roles/openshift_examples/files/examples/v1.1/quickstart-templates/rails-postgresql.json402
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-streams/jboss-image-streams.json107
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-basic.json325
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent-ssl.json521
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent.json343
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-ssl.json507
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-persistent-s2i.json659
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-s2i.json619
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-basic-s2i.json305
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-https-s2i.json413
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-persistent-s2i.json669
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-s2i.json629
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-persistent-s2i.json676
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-s2i.json636
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-persistent-s2i.json649
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-s2i.json609
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-basic-s2i.json279
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-https-s2i.json387
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json643
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-s2i.json603
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json645
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-s2i.json605
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json618
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-s2i.json578
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-basic-s2i.json279
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-https-s2i.json387
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json643
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-s2i.json603
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json645
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-s2i.json605
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json618
-rw-r--r--roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-s2i.json576
-rw-r--r--roles/openshift_examples/tasks/main.yml4
-rw-r--r--roles/openshift_expand_partition/README.md2
-rw-r--r--roles/openshift_expand_partition/tasks/main.yml5
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py130
-rw-r--r--roles/openshift_facts/tasks/main.yml10
-rw-r--r--roles/openshift_manageiq/tasks/main.yaml50
-rw-r--r--roles/openshift_manageiq/vars/main.yml24
-rw-r--r--roles/openshift_master/tasks/main.yml44
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j228
-rw-r--r--roles/openshift_master/templates/v1_partials/oauthConfig.j293
-rw-r--r--roles/openshift_master_ca/tasks/main.yml6
-rw-r--r--roles/openshift_node/tasks/main.yml21
-rw-r--r--roles/openshift_node/tasks/storage_plugins/ceph.yml7
-rw-r--r--roles/openshift_node/tasks/storage_plugins/glusterfs.yml16
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j28
-rw-r--r--roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo8
-rw-r--r--roles/openshift_repos/handlers/main.yml6
-rw-r--r--roles/openshift_repos/tasks/main.yaml37
-rw-r--r--roles/openshift_serviceaccounts/tasks/main.yml6
-rw-r--r--roles/openshift_storage_nfs_lvm/tasks/nfs.yml5
-rw-r--r--roles/os_env_extras/tasks/main.yaml7
-rw-r--r--roles/os_firewall/tasks/firewall/firewalld.yml8
-rw-r--r--roles/os_firewall/tasks/firewall/iptables.yml11
-rw-r--r--roles/os_update_latest/tasks/main.yml5
-rw-r--r--roles/os_zabbix/tasks/main.yml40
-rw-r--r--roles/os_zabbix/vars/template_aws.yml2
-rw-r--r--roles/os_zabbix/vars/template_openshift_master.yml220
-rw-r--r--roles/os_zabbix/vars/template_os_linux.yml22
-rw-r--r--roles/oso_host_monitoring/README.md50
-rw-r--r--roles/oso_host_monitoring/defaults/main.yml1
-rw-r--r--roles/oso_host_monitoring/handlers/main.yml12
-rw-r--r--roles/oso_host_monitoring/meta/main.yml8
-rw-r--r--roles/oso_host_monitoring/tasks/main.yml65
-rw-r--r--roles/oso_host_monitoring/templates/docker-registry.ops.cfg.j21
-rw-r--r--roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j243
-rw-r--r--roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j262
-rw-r--r--roles/oso_host_monitoring/vars/main.yml1
-rw-r--r--roles/rhel_subscribe/tasks/main.yml11
-rw-r--r--roles/tito/README.md38
-rw-r--r--roles/tito/defaults/main.yml2
-rw-r--r--roles/tito/handlers/main.yml2
-rw-r--r--roles/tito/meta/main.yml14
-rw-r--r--roles/tito/tasks/main.yml4
-rw-r--r--roles/tito/vars/main.yml2
-rw-r--r--roles/yum_repos/README.md2
-rwxr-xr-xutils/site_assets/oo-install-bootstrap.sh9
-rw-r--r--utils/src/ooinstall/cli_installer.py276
-rw-r--r--utils/src/ooinstall/oo_config.py39
-rw-r--r--utils/src/ooinstall/openshift_ansible.py95
-rw-r--r--utils/src/ooinstall/variants.py6
-rw-r--r--utils/test/cli_installer_tests.py567
-rw-r--r--utils/test/fixture.py221
-rw-r--r--utils/test/oo_config_tests.py34
249 files changed, 27722 insertions, 849 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index c2f5784ce..ce566784c 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.12-1 ./
+3.0.16-1 ./
diff --git a/.tito/packages/openshift-ansible-bin b/.tito/packages/openshift-ansible-bin
deleted file mode 100644
index 5275dfcf9..000000000
--- a/.tito/packages/openshift-ansible-bin
+++ /dev/null
@@ -1 +0,0 @@
-0.0.21-1 bin/
diff --git a/.tito/packages/openshift-ansible-inventory b/.tito/packages/openshift-ansible-inventory
deleted file mode 100644
index 85502438a..000000000
--- a/.tito/packages/openshift-ansible-inventory
+++ /dev/null
@@ -1 +0,0 @@
-0.0.11-1 inventory/
diff --git a/.tito/releasers.conf b/.tito/releasers.conf
index f863ce9b1..a9116291a 100644
--- a/.tito/releasers.conf
+++ b/.tito/releasers.conf
@@ -11,3 +11,7 @@ srpm_disttag = .el7ose
releaser = tito.release.DistGitReleaser
branches = rhaos-3.1-rhel-7
srpm_disttag = .el7aos
+
+[copr-openshift-ansible]
+releaser = tito.release.CoprReleaser
+project_name = openshift-ansible
diff --git a/README.md b/README.md
index 635df36a0..cef2ed0b6 100644
--- a/README.md
+++ b/README.md
@@ -6,7 +6,7 @@ This repo contains Ansible code for OpenShift and Atomic Enterprise.
- Install base dependencies:
- Fedora:
```
- yum install -y ansible rubygem-thor rubygem-parseconfig util-linux
+ dnf install -y ansible rubygem-thor rubygem-parseconfig util-linux pyOpenSSL libffi-devel python-cryptography
```
- OSX:
```
diff --git a/README_AWS.md b/README_AWS.md
index 6757e2892..16ccb07e8 100644
--- a/README_AWS.md
+++ b/README_AWS.md
@@ -81,9 +81,20 @@ Node specific defaults:
- Docker volume type: gp2 (only applicable if ephemeral is false)
- Docker volume iops: 500 (only applicable when volume type is io1)
+Specifying ec2 instance type.
+All instances:
+- export ec2_instance_type='m4.large'
+Master instances:
+- export ec2_master_instance_type='m4.large'
+Infra node instances:
+- export ec2_infra_instance_type='m4.large'
+Non-infra node instances:
+- export ec2_node_instance_type='m4.large'
+etcd instances:
+- export ec2_etcd_instance_type='m4.large'
+
If needed, these values can be changed by setting environment variables on your system.
-- export ec2_instance_type='m4.large'
- export ec2_image='ami-307b3658'
- export ec2_region='us-east-1'
- export ec2_keypair='libra'
@@ -105,7 +116,7 @@ Install Dependencies
1. Ansible requires python-boto for aws operations:
RHEL/CentOS/Fedora
```
- yum install -y ansible python-boto
+ yum install -y ansible python-boto pyOpenSSL
```
OSX:
```
diff --git a/README_GCE.md b/README_GCE.md
index 50f8ade70..ea673b44d 100644
--- a/README_GCE.md
+++ b/README_GCE.md
@@ -43,7 +43,11 @@ Mandatory customization variables (check the values according to your tenant):
* zone = europe-west1-d
* network = default
* gce_machine_type = n1-standard-2
+* gce_machine_master_type = n1-standard-1
+* gce_machine_node_type = n1-standard-2
* gce_machine_image = preinstalled-slave-50g-v5
+* gce_machine_master_image = preinstalled-slave-50g-v5
+* gce_machine_node_image = preinstalled-slave-50g-v5
1. vi ~/.gce/gce.ini
@@ -56,7 +60,11 @@ gce_project_id = project_id
zone = europe-west1-d
network = default
gce_machine_type = n1-standard-2
+gce_machine_master_type = n1-standard-1
+gce_machine_node_type = n1-standard-2
gce_machine_image = preinstalled-slave-50g-v5
+gce_machine_master_image = preinstalled-slave-50g-v5
+gce_machine_node_image = preinstalled-slave-50g-v5
```
1. Define the environment variable GCE_INI_PATH so gce.py can pick it up and bin/cluster can also read it
diff --git a/README_origin.md b/README_origin.md
index cb213a93a..343ecda3d 100644
--- a/README_origin.md
+++ b/README_origin.md
@@ -39,6 +39,12 @@ subscription-manager repos \
```
* Configuration of router is not automated yet
* Configuration of docker-registry is not automated yet
+* Fedora 23+ doesn't come with python2 and will need a quick bootstrap. Setup
+ your inventory as described below and run the following (substituting the
+ `$PATH_TO_INVENTORY_FILE` with the actual path to your inventory file):
+```sh
+ansible-playbook ./playbooks/adhoc/bootstrap-fedora.yml -i $PATH_TO_INVENTORY_FILE
+```
## Configuring the host inventory
[Ansible docs](http://docs.ansible.com/intro_inventory.html)
@@ -59,6 +65,7 @@ nodes
# Set variables common for all OSEv3 hosts
[OSv3:vars]
+
# SSH user, this user should allow ssh based auth without requiring a password
ansible_ssh_user=root
@@ -75,6 +82,14 @@ osv3-master.example.com
[nodes]
osv3-master.example.com
osv3-node[1:2].example.com
+
+# host group for etcd
+[etcd]
+osv3-etcd[1:3].example.com
+
+[lb]
+osv3-lb.example.com
+
```
The hostnames above should resolve both from the hosts themselves and
diff --git a/README_vagrant.md b/README_vagrant.md
index f3e4cfc18..73fd31476 100644
--- a/README_vagrant.md
+++ b/README_vagrant.md
@@ -3,7 +3,6 @@ Requirements
- ansible (the latest 1.9 release is preferred, but any version greater than 1.9.1 should be sufficient).
- vagrant (tested against version 1.7.2)
- vagrant-hostmanager plugin (tested against version 1.5.0)
-- vagrant-registration plugin (only required for enterprise deployment type)
- vagrant-libvirt (tested against version 0.0.26)
- Only required if using libvirt instead of virtualbox
@@ -44,7 +43,8 @@ The following environment variables can be overriden:
- ``OPENSHIFT_DEPLOYMENT_TYPE`` (defaults to origin, choices: origin, enterprise, online)
- ``OPENSHIFT_NUM_NODES`` (the number of nodes to create, defaults to 2)
-For ``enterprise`` deployment types these env variables should also be specified:
+Note that if ``OPENSHIFT_DEPLOYMENT_TYPE`` is ``enterprise`` you should also specify environment variables related to ``subscription-manager`` which are used by the ``rhel_subscribe`` role:
+
- ``rhel_subscription_user``: rhsm user
- ``rhel_subscription_pass``: rhsm password
- (optional) ``rhel_subscription_pool``: poolID to attach a specific subscription besides what auto-attach detects
diff --git a/Vagrantfile b/Vagrantfile
index 33532cd63..362e1ff48 100644
--- a/Vagrantfile
+++ b/Vagrantfile
@@ -16,27 +16,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.hostmanager.include_offline = true
config.ssh.insert_key = false
- if deployment_type === 'enterprise'
- unless Vagrant.has_plugin?('vagrant-registration')
- raise 'vagrant-registration-plugin is required for enterprise deployment'
- end
- username = ENV['rhel_subscription_user']
- password = ENV['rhel_subscription_pass']
- unless username and password
- raise 'rhel_subscription_user and rhel_subscription_pass are required'
- end
- config.registration.username = username
- config.registration.password = password
- # FIXME this is temporary until vagrant/ansible registration modules
- # are capable of handling specific subscription pools
- if not ENV['rhel_subscription_pool'].nil?
- config.vm.provision "shell" do |s|
- s.inline = "subscription-manager attach --pool=$1 || true"
- s.args = "#{ENV['rhel_subscription_pool']}"
- end
- end
- end
-
config.vm.provider "virtualbox" do |vbox, override|
override.vm.box = "centos/7"
vbox.memory = 1024
diff --git a/bin/cluster b/bin/cluster
index 59a6755d3..9b02b4347 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -57,7 +57,7 @@ class Cluster(object):
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/launch.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
env['num_masters'] = args.masters
@@ -67,6 +67,21 @@ class Cluster(object):
self.action(args, inventory, env, playbook)
+ def addNodes(self, args):
+ """
+ Add nodes to an existing cluster for given provider
+ :param args: command line arguments provided by user
+ """
+ env = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args)}
+ playbook = "playbooks/{0}/openshift-cluster/addNodes.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ env['num_nodes'] = args.nodes
+ env['num_infra'] = args.infra
+
+ self.action(args, inventory, env, playbook)
+
def terminate(self, args):
"""
Destroy OpenShift cluster
@@ -74,7 +89,7 @@ class Cluster(object):
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/terminate.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
self.action(args, inventory, env, playbook)
@@ -86,7 +101,7 @@ class Cluster(object):
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/list.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
self.action(args, inventory, env, playbook)
@@ -98,7 +113,7 @@ class Cluster(object):
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/config.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
self.action(args, inventory, env, playbook)
@@ -110,7 +125,7 @@ class Cluster(object):
"""
env = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/update.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
self.action(args, inventory, env, playbook)
@@ -124,7 +139,7 @@ class Cluster(object):
'deployment_type': self.get_deployment_type(args),
'new_cluster_state': args.state}
- playbook = "playbooks/{}/openshift-cluster/service.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/service.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
self.action(args, inventory, env, playbook)
@@ -163,7 +178,7 @@ class Cluster(object):
boto_configs = [conf for conf in boto_conf_files if conf_exists(conf)]
if len(key_missing) > 0 and len(boto_configs) == 0:
- raise ValueError("PROVIDER aws requires {} environment variable(s). See README_AWS.md".format(key_missing))
+ raise ValueError("PROVIDER aws requires {0} environment variable(s). See README_AWS.md".format(key_missing))
elif 'libvirt' == provider:
inventory = '-i inventory/libvirt/hosts'
@@ -171,7 +186,7 @@ class Cluster(object):
inventory = '-i inventory/openstack/hosts'
else:
# this code should never be reached
- raise ValueError("invalid PROVIDER {}".format(provider))
+ raise ValueError("invalid PROVIDER {0}".format(provider))
return inventory
@@ -186,18 +201,18 @@ class Cluster(object):
verbose = ''
if args.verbose > 0:
- verbose = '-{}'.format('v' * args.verbose)
+ verbose = '-{0}'.format('v' * args.verbose)
if args.option:
for opt in args.option:
k, v = opt.split('=', 1)
env['cli_' + k] = v
- ansible_env = '-e \'{}\''.format(
+ ansible_env = '-e \'{0}\''.format(
' '.join(['%s=%s' % (key, value) for (key, value) in env.items()])
)
- command = 'ansible-playbook {} {} {} {}'.format(
+ command = 'ansible-playbook {0} {1} {2} {3}'.format(
verbose, inventory, ansible_env, playbook
)
@@ -205,16 +220,16 @@ class Cluster(object):
command = 'ANSIBLE_CALLBACK_PLUGINS=ansible-profile/callback_plugins ' + command
if args.verbose > 1:
- command = 'time {}'.format(command)
+ command = 'time {0}'.format(command)
if args.verbose > 0:
- sys.stderr.write('RUN [{}]\n'.format(command))
+ sys.stderr.write('RUN [{0}]\n'.format(command))
sys.stderr.flush()
try:
subprocess.check_call(command, shell=True)
except subprocess.CalledProcessError as exc:
- raise ActionFailed("ACTION [{}] failed: {}"
+ raise ActionFailed("ACTION [{0}] failed: {1}"
.format(args.action, exc))
@@ -292,6 +307,16 @@ if __name__ == '__main__':
help='number of external etcd hosts to create in cluster')
create_parser.set_defaults(func=cluster.create)
+
+ create_parser = action_parser.add_parser('addNodes', help='Add nodes to a cluster',
+ parents=[meta_parser])
+ create_parser.add_argument('-n', '--nodes', default=1, type=int,
+ help='number of nodes to add to the cluster')
+ create_parser.add_argument('-i', '--infra', default=1, type=int,
+ help='number of infra nodes to add to the cluster')
+ create_parser.set_defaults(func=cluster.addNodes)
+
+
config_parser = action_parser.add_parser('config',
help='Configure or reconfigure a cluster',
parents=[meta_parser])
@@ -325,14 +350,14 @@ if __name__ == '__main__':
args = parser.parse_args()
if 'terminate' == args.action and not args.force:
- answer = raw_input("This will destroy the ENTIRE {} environment. Are you sure? [y/N] ".format(args.cluster_id))
+ answer = raw_input("This will destroy the ENTIRE {0} environment. Are you sure? [y/N] ".format(args.cluster_id))
if answer not in ['y', 'Y']:
sys.stderr.write('\nACTION [terminate] aborted by user!\n')
exit(1)
if 'update' == args.action and not args.force:
answer = raw_input(
- "This is destructive and could corrupt {} environment. Continue? [y/N] ".format(args.cluster_id))
+ "This is destructive and could corrupt {0} environment. Continue? [y/N] ".format(args.cluster_id))
if answer not in ['y', 'Y']:
sys.stderr.write('\nACTION [update] aborted by user!\n')
exit(1)
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 9a17913c4..1a854f637 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -191,7 +191,11 @@ class FilterModule(object):
{ 'root':
{ 'volume_size': 10, 'device_type': 'gp2',
'iops': 500
- }
+ },
+ 'docker':
+ { 'volume_size': 40, 'device_type': 'gp2',
+ 'iops': 500, 'ephemeral': 'true'
+ }
},
'node':
{ 'root':
@@ -216,7 +220,7 @@ class FilterModule(object):
root_vol['delete_on_termination'] = True
if root_vol['device_type'] != 'io1':
root_vol.pop('iops', None)
- if host_type == 'node':
+ if host_type in ['master', 'node'] and 'docker' in data[host_type]:
docker_vol = data[host_type]['docker']
docker_vol['device_name'] = '/dev/xvdb'
docker_vol['delete_on_termination'] = True
@@ -227,7 +231,7 @@ class FilterModule(object):
docker_vol.pop('delete_on_termination', None)
docker_vol['ephemeral'] = 'ephemeral0'
return [root_vol, docker_vol]
- elif host_type == 'etcd':
+ elif host_type == 'etcd' and 'etcd' in data[host_type]:
etcd_vol = data[host_type]['etcd']
etcd_vol['device_name'] = '/dev/xvdb'
etcd_vol['delete_on_termination'] = True
@@ -346,27 +350,27 @@ class FilterModule(object):
@staticmethod
# pylint: disable=too-many-branches
- def oo_parse_certificate_names(certificates, data_dir, internal_hostnames):
+ def oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames):
''' Parses names from list of certificate hashes.
- Ex: certificates = [{ "certfile": "/etc/origin/master/custom1.crt",
- "keyfile": "/etc/origin/master/custom1.key" },
+ Ex: certificates = [{ "certfile": "/root/custom1.crt",
+ "keyfile": "/root/custom1.key" },
{ "certfile": "custom2.crt",
"keyfile": "custom2.key" }]
- returns [{ "certfile": "/etc/origin/master/custom1.crt",
- "keyfile": "/etc/origin/master/custom1.key",
+ returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt",
+ "keyfile": "/etc/origin/master/named_certificates/custom1.key",
"names": [ "public-master-host.com",
"other-master-host.com" ] },
- { "certfile": "/etc/origin/master/custom2.crt",
- "keyfile": "/etc/origin/master/custom2.key",
+ { "certfile": "/etc/origin/master/named_certificates/custom2.crt",
+ "keyfile": "/etc/origin/master/named_certificates/custom2.key",
"names": [ "some-hostname.com" ] }]
'''
if not issubclass(type(certificates), list):
raise errors.AnsibleFilterError("|failed expects certificates is a list")
- if not issubclass(type(data_dir), unicode):
- raise errors.AnsibleFilterError("|failed expects data_dir is unicode")
+ if not issubclass(type(named_certs_dir), unicode):
+ raise errors.AnsibleFilterError("|failed expects named_certs_dir is unicode")
if not issubclass(type(internal_hostnames), list):
raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
@@ -399,8 +403,70 @@ class FilterModule(object):
raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
"detected a collision with internal hostname, please specify " +
"certificate names in host inventory"))
+
+ for certificate in certificates:
+ # Update paths for configuration
+ certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile']))
+ certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile']))
return certificates
+ @staticmethod
+ def oo_pretty_print_cluster(data):
+ ''' Read a subset of hostvars and build a summary of the cluster
+ in the following layout:
+
+"c_id": {
+ "master": [
+ { "name": "c_id-master-12345", "public IP": "172.16.0.1", "private IP": "192.168.0.1", "subtype": "default" }]
+ "node": [
+ { "name": "c_id-node-infra-23456", "public IP": "172.16.0.2", "private IP": "192.168.0.2", "subtype": "infra" },
+ { "name": "c_id-node-compute-23456", "public IP": "172.16.0.3", "private IP": "192.168.0.3", "subtype": "compute" },
+ ...
+ ]}
+ '''
+
+ def _get_tag_value(tags, key):
+ ''' Extract values of a map implemented as a set.
+ Ex: tags = { 'tag_foo_value1', 'tag_bar_value2', 'tag_baz_value3' }
+ key = 'bar'
+ returns 'value2'
+ '''
+ for tag in tags:
+ # Skip tag_env-host-type to avoid ambiguity with tag_env
+ if tag[:17] == 'tag_env-host-type':
+ continue
+ if tag[:len(key)+4] == 'tag_' + key:
+ return tag[len(key)+5:]
+ raise KeyError(key)
+
+ def _add_host(clusters,
+ env,
+ host_type,
+ sub_host_type,
+ host):
+ ''' Add a new host in the clusters data structure '''
+ if env not in clusters:
+ clusters[env] = {}
+ if host_type not in clusters[env]:
+ clusters[env][host_type] = {}
+ if sub_host_type not in clusters[env][host_type]:
+ clusters[env][host_type][sub_host_type] = []
+ clusters[env][host_type][sub_host_type].append(host)
+
+ clusters = {}
+ for host in data:
+ try:
+ _add_host(clusters=clusters,
+ env=_get_tag_value(host['group_names'], 'env'),
+ host_type=_get_tag_value(host['group_names'], 'host-type'),
+ sub_host_type=_get_tag_value(host['group_names'], 'sub-host-type'),
+ host={'name': host['inventory_hostname'],
+ 'public IP': host['ansible_ssh_host'],
+ 'private IP': host['ansible_default_ipv4']['address']})
+ except KeyError:
+ pass
+ return clusters
+
def filters(self):
''' returns a mapping of filters to methods '''
return {
@@ -417,6 +483,7 @@ class FilterModule(object):
"oo_split": self.oo_split,
"oo_filter_list": self.oo_filter_list,
"oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs,
- "oo_parse_certificate_names": self.oo_parse_certificate_names,
- "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters
+ "oo_parse_named_certificates": self.oo_parse_named_certificates,
+ "oo_haproxy_backend_masters": self.oo_haproxy_backend_masters,
+ "oo_pretty_print_cluster": self.oo_pretty_print_cluster
}
diff --git a/filter_plugins/openshift_master.py b/filter_plugins/openshift_master.py
new file mode 100644
index 000000000..f12017967
--- /dev/null
+++ b/filter_plugins/openshift_master.py
@@ -0,0 +1,469 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-master
+'''
+import copy
+import sys
+import yaml
+
+from ansible import errors
+from ansible.runner.filter_plugins.core import bool as ansible_bool
+
+
+class IdentityProviderBase(object):
+ """ IdentityProviderBase
+
+ Attributes:
+ name (str): Identity provider Name
+ login (bool): Is this identity provider a login provider?
+ challenge (bool): Is this identity provider a challenge provider?
+ provider (dict): Provider specific config
+ _idp (dict): internal copy of the IDP dict passed in
+ _required (list): List of lists of strings for required attributes
+ _optional (list): List of lists of strings for optional attributes
+ _allow_additional (bool): Does this provider support attributes
+ not in _required and _optional
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ # disabling this check since the number of instance attributes are
+ # necessary for this class
+ # pylint: disable=too-many-instance-attributes
+ def __init__(self, api_version, idp):
+ if api_version not in ['v1']:
+ raise errors.AnsibleFilterError("|failed api version {0} unknown".format(api_version))
+
+ self._idp = copy.deepcopy(idp)
+
+ if 'name' not in self._idp:
+ raise errors.AnsibleFilterError("|failed identity provider missing a name")
+
+ if 'kind' not in self._idp:
+ raise errors.AnsibleFilterError("|failed identity provider missing a kind")
+
+ self.name = self._idp.pop('name')
+ self.login = ansible_bool(self._idp.pop('login', False))
+ self.challenge = ansible_bool(self._idp.pop('challenge', False))
+ self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind'))
+
+ self._required = [['mappingMethod', 'mapping_method']]
+ self._optional = []
+ self._allow_additional = True
+
+ @staticmethod
+ def validate_idp_list(idp_list):
+ ''' validates a list of idps '''
+ login_providers = [x.name for x in idp_list if x.login]
+ if len(login_providers) > 1:
+ raise errors.AnsibleFilterError("|failed multiple providers are "
+ "not allowed for login. login "
+ "providers: {0}".format(', '.join(login_providers)))
+
+ names = [x.name for x in idp_list]
+ if len(set(names)) != len(names):
+ raise errors.AnsibleFilterError("|failed more than one provider configured with the same name")
+
+ for idp in idp_list:
+ idp.validate()
+
+ def validate(self):
+ ''' validate an instance of this idp class '''
+ valid_mapping_methods = ['add', 'claim', 'generate', 'lookup']
+ if self.provider['mappingMethod'] not in valid_mapping_methods:
+ raise errors.AnsibleFilterError("|failed unkown mapping method "
+ "for provider {0}".format(self.__class__.__name__))
+
+ @staticmethod
+ def get_default(key):
+ ''' get a default value for a given key '''
+ if key == 'mappingMethod':
+ return 'claim'
+ else:
+ return None
+
+ def set_provider_item(self, items, required=False):
+ ''' set a provider item based on the list of item names provided. '''
+ for item in items:
+ provider_key = items[0]
+ if item in self._idp:
+ self.provider[provider_key] = self._idp.pop(item)
+ break
+ else:
+ default = self.get_default(provider_key)
+ if default is not None:
+ self.provider[provider_key] = default
+ elif required:
+ raise errors.AnsibleFilterError("|failed provider {0} missing "
+ "required key {1}".format(self.__class__.__name__, provider_key))
+
+ def set_provider_items(self):
+ ''' set the provider items for this idp '''
+ for items in self._required:
+ self.set_provider_item(items, True)
+ for items in self._optional:
+ self.set_provider_item(items)
+ if self._allow_additional:
+ for key in self._idp.keys():
+ self.set_provider_item([key])
+ else:
+ if len(self._idp) > 0:
+ raise errors.AnsibleFilterError("|failed provider {0} "
+ "contains unknown keys "
+ "{1}".format(self.__class__.__name__, ', '.join(self._idp.keys())))
+
+ def to_dict(self):
+ ''' translate this idp to a dictionary '''
+ return dict(name=self.name, challenge=self.challenge,
+ login=self.login, provider=self.provider)
+
+
+class LDAPPasswordIdentityProvider(IdentityProviderBase):
+ """ LDAPPasswordIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+ self._required += [['attributes'], ['url'], ['insecure']]
+ self._optional += [['ca'],
+ ['bindDN', 'bind_dn'],
+ ['bindPassword', 'bind_password']]
+
+ self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False))
+
+ if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']:
+ pref_user = self._idp['attributes'].pop('preferred_username')
+ self._idp['attributes']['preferredUsername'] = pref_user
+
+ def validate(self):
+ ''' validate this idp instance '''
+ IdentityProviderBase.validate(self)
+ if not isinstance(self.provider['attributes'], dict):
+ raise errors.AnsibleFilterError("|failed attributes for provider "
+ "{0} must be a dictionary".format(self.__class__.__name__))
+
+ attrs = ['id', 'email', 'name', 'preferredUsername']
+ for attr in attrs:
+ if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list):
+ raise errors.AnsibleFilterError("|failed {0} attribute for "
+ "provider {1} must be a list".format(attr, self.__class__.__name__))
+
+ unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs)
+ if len(unknown_attrs) > 0:
+ raise errors.AnsibleFilterError("|failed provider {0} has unknown "
+ "attributes: {1}".format(self.__class__.__name__, ', '.join(unknown_attrs)))
+
+
+class KeystonePasswordIdentityProvider(IdentityProviderBase):
+ """ KeystoneIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+ self._required += [['url'], ['domainName', 'domain_name']]
+ self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
+
+
+class RequestHeaderIdentityProvider(IdentityProviderBase):
+ """ RequestHeaderIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+ self._required += [['headers']]
+ self._optional += [['challengeURL', 'challenge_url'],
+ ['loginURL', 'login_url'],
+ ['clientCA', 'client_ca']]
+
+ def validate(self):
+ ''' validate this idp instance '''
+ IdentityProviderBase.validate(self)
+ if not isinstance(self.provider['headers'], list):
+ raise errors.AnsibleFilterError("|failed headers for provider {0} "
+ "must be a list".format(self.__class__.__name__))
+
+
+class AllowAllPasswordIdentityProvider(IdentityProviderBase):
+ """ AllowAllPasswordIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+
+
+class DenyAllPasswordIdentityProvider(IdentityProviderBase):
+ """ DenyAllPasswordIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+
+
+class HTPasswdPasswordIdentityProvider(IdentityProviderBase):
+ """ HTPasswdPasswordIdentity
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+ self._required += [['file', 'filename', 'fileName', 'file_name']]
+
+ @staticmethod
+ def get_default(key):
+ if key == 'file':
+ return '/etc/origin/htpasswd'
+ else:
+ return IdentityProviderBase.get_default(key)
+
+
+class BasicAuthPasswordIdentityProvider(IdentityProviderBase):
+ """ BasicAuthPasswordIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+ self._required += [['url']]
+ self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]
+
+
+class IdentityProviderOauthBase(IdentityProviderBase):
+ """ IdentityProviderOauthBase
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderBase.__init__(self, api_version, idp)
+ self._allow_additional = False
+ self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]
+
+ def validate(self):
+ ''' validate this idp instance '''
+ IdentityProviderBase.validate(self)
+ if self.challenge:
+ raise errors.AnsibleFilterError("|failed provider {0} does not "
+ "allow challenge authentication".format(self.__class__.__name__))
+
+
+class OpenIDIdentityProvider(IdentityProviderOauthBase):
+ """ OpenIDIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderOauthBase.__init__(self, api_version, idp)
+ self._required += [['claims'], ['urls']]
+ self._optional += [['ca'],
+ ['extraScopes'],
+ ['extraAuthorizeParameters']]
+ if 'claims' in self._idp and 'preferred_username' in self._idp['claims']:
+ pref_user = self._idp['claims'].pop('preferred_username')
+ self._idp['claims']['preferredUsername'] = pref_user
+ if 'urls' in self._idp and 'user_info' in self._idp['urls']:
+ user_info = self._idp['urls'].pop('user_info')
+ self._idp['urls']['userInfo'] = user_info
+ if 'extra_scopes' in self._idp:
+ self._idp['extraScopes'] = self._idp.pop('extra_scopes')
+ if 'extra_authorize_parameters' in self._idp:
+ self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')
+
+ if 'extraAuthorizeParameters' in self._idp:
+ if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:
+ val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))
+ self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val
+
+
+ def validate(self):
+ ''' validate this idp instance '''
+ IdentityProviderOauthBase.validate(self)
+ if not isinstance(self.provider['claims'], dict):
+ raise errors.AnsibleFilterError("|failed claims for provider {0} "
+ "must be a dictionary".format(self.__class__.__name__))
+
+ if 'extraScopes' not in self.provider['extraScopes'] and not isinstance(self.provider['extraScopes'], list):
+ raise errors.AnsibleFilterError("|failed extraScopes for provider "
+ "{0} must be a list".format(self.__class__.__name__))
+ if ('extraAuthorizeParameters' not in self.provider['extraAuthorizeParameters']
+ and not isinstance(self.provider['extraAuthorizeParameters'], dict)):
+ raise errors.AnsibleFilterError("|failed extraAuthorizeParameters "
+ "for provider {0} must be a dictionary".format(self.__class__.__name__))
+
+ required_claims = ['id']
+ optional_claims = ['email', 'name', 'preferredUsername']
+ all_claims = required_claims + optional_claims
+
+ for claim in required_claims:
+ if claim in required_claims and claim not in self.provider['claims']:
+ raise errors.AnsibleFilterError("|failed {0} claim missing "
+ "for provider {1}".format(claim, self.__class__.__name__))
+
+ for claim in all_claims:
+ if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list):
+ raise errors.AnsibleFilterError("|failed {0} claims for "
+ "provider {1} must be a list".format(claim, self.__class__.__name__))
+
+ unknown_claims = set(self.provider['claims'].keys()) - set(all_claims)
+ if len(unknown_claims) > 0:
+ raise errors.AnsibleFilterError("|failed provider {0} has unknown "
+ "claims: {1}".format(self.__class__.__name__, ', '.join(unknown_claims)))
+
+ if not isinstance(self.provider['urls'], dict):
+ raise errors.AnsibleFilterError("|failed urls for provider {0} "
+ "must be a dictionary".format(self.__class__.__name__))
+
+ required_urls = ['authorize', 'token']
+ optional_urls = ['userInfo']
+ all_urls = required_urls + optional_urls
+
+ for url in required_urls:
+ if url not in self.provider['urls']:
+ raise errors.AnsibleFilterError("|failed {0} url missing for "
+ "provider {1}".format(url, self.__class__.__name__))
+
+ unknown_urls = set(self.provider['urls'].keys()) - set(all_urls)
+ if len(unknown_urls) > 0:
+ raise errors.AnsibleFilterError("|failed provider {0} has unknown "
+ "urls: {1}".format(self.__class__.__name__, ', '.join(unknown_urls)))
+
+
+class GoogleIdentityProvider(IdentityProviderOauthBase):
+ """ GoogleIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ def __init__(self, api_version, idp):
+ IdentityProviderOauthBase.__init__(self, api_version, idp)
+ self._optional += [['hostedDomain', 'hosted_domain']]
+
+
+class GitHubIdentityProvider(IdentityProviderOauthBase):
+ """ GitHubIdentityProvider
+
+ Attributes:
+
+ Args:
+ api_version(str): OpenShift config version
+ idp (dict): idp config dict
+
+ Raises:
+ AnsibleFilterError:
+ """
+ pass
+
+
+class FilterModule(object):
+ ''' Custom ansible filters for use by the openshift_master role'''
+
+ @staticmethod
+ def translate_idps(idps, api_version):
+ ''' Translates a list of dictionaries into a valid identityProviders config '''
+ idp_list = []
+
+ if not isinstance(idps, list):
+ raise errors.AnsibleFilterError("|failed expects to filter on a list of identity providers")
+ for idp in idps:
+ if not isinstance(idp, dict):
+ raise errors.AnsibleFilterError("|failed identity providers must be a list of dictionaries")
+
+ cur_module = sys.modules[__name__]
+ idp_class = getattr(cur_module, idp['kind'], None)
+ idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp)
+ idp_inst.set_provider_items()
+ idp_list.append(idp_inst)
+
+
+ IdentityProviderBase.validate_idp_list(idp_list)
+ return yaml.safe_dump([idp.to_dict() for idp in idp_list], default_flow_style=False)
+
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {"translate_idps": self.translate_idps}
diff --git a/git/pylint.sh b/git/pylint.sh
index 55e8b6131..f29c055dc 100755
--- a/git/pylint.sh
+++ b/git/pylint.sh
@@ -40,6 +40,8 @@ for PY_FILE in $PY_DIFF; do
fi
done
+export PYTHONPATH=${WORKSPACE}/utils/src/:${WORKSPACE}/utils/test/
+
if [ "${FILES_TO_TEST}" != "" ]; then
echo "Testing files: ${FILES_TO_TEST}"
exec ${PYTHON} -m pylint --rcfile ${WORKSPACE}/git/.pylintrc ${FILES_TO_TEST}
diff --git a/inventory/aws/hosts/ec2.ini b/inventory/aws/hosts/ec2.ini
index eaab0a410..1f503b8cf 100644
--- a/inventory/aws/hosts/ec2.ini
+++ b/inventory/aws/hosts/ec2.ini
@@ -24,24 +24,61 @@ regions_exclude = us-gov-west-1,cn-north-1
# This is the normal destination variable to use. If you are running Ansible
# from outside EC2, then 'public_dns_name' makes the most sense. If you are
# running Ansible from within EC2, then perhaps you want to use the internal
-# address, and should set this to 'private_dns_name'.
+# address, and should set this to 'private_dns_name'. The key of an EC2 tag
+# may optionally be used; however the boto instance variables hold precedence
+# in the event of a collision.
destination_variable = public_dns_name
# For server inside a VPC, using DNS names may not make sense. When an instance
# has 'subnet_id' set, this variable is used. If the subnet is public, setting
# this to 'ip_address' will return the public IP address. For instances in a
# private subnet, this should be set to 'private_ip_address', and Ansible must
-# be run from with EC2.
+# be run from within EC2. The key of an EC2 tag may optionally be used; however
+# the boto instance variables hold precedence in the event of a collision.
+# WARNING: - instances that are in the private vpc, _without_ public ip address
+# will not be listed in the inventory until You set:
+# vpc_destination_variable = 'private_ip_address'
vpc_destination_variable = ip_address
# To tag instances on EC2 with the resource records that point to them from
# Route53, uncomment and set 'route53' to True.
route53 = False
+# To exclude RDS instances from the inventory, uncomment and set to False.
+#rds = False
+
+# To exclude ElastiCache instances from the inventory, uncomment and set to False.
+#elasticache = False
+
# Additionally, you can specify the list of zones to exclude looking up in
# 'route53_excluded_zones' as a comma-separated list.
# route53_excluded_zones = samplezone1.com, samplezone2.com
+# By default, only EC2 instances in the 'running' state are returned. Set
+# 'all_instances' to True to return all instances regardless of state.
+all_instances = False
+
+# By default, only EC2 instances in the 'running' state are returned. Specify
+# EC2 instance states to return as a comma-separated list. This
+# option is overriden when 'all_instances' is True.
+# instance_states = pending, running, shutting-down, terminated, stopping, stopped
+
+# By default, only RDS instances in the 'available' state are returned. Set
+# 'all_rds_instances' to True return all RDS instances regardless of state.
+all_rds_instances = False
+
+# By default, only ElastiCache clusters and nodes in the 'available' state
+# are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
+# to True return all ElastiCache clusters and nodes, regardless of state.
+#
+# Note that all_elasticache_nodes only applies to listed clusters. That means
+# if you set all_elastic_clusters to false, no node will be return from
+# unavailable clusters, regardless of the state and to what you set for
+# all_elasticache_nodes.
+all_elasticache_replication_groups = False
+all_elasticache_clusters = False
+all_elasticache_nodes = False
+
# API calls to EC2 are slow. For this reason, we cache the results of an API
# call. Set this to the path you want cache files to be written to. Two files
# will be written to this directory:
@@ -60,3 +97,59 @@ cache_max_age = 300
# destination_variable and vpc_destination_variable.
# destination_format = {0}.{1}.rhcloud.com
# destination_format_tags = Name,environment
+
+# Organize groups into a nested/hierarchy instead of a flat namespace.
+nested_groups = False
+
+# Replace - tags when creating groups to avoid issues with ansible
+replace_dash_in_groups = False
+
+# The EC2 inventory output can become very large. To manage its size,
+# configure which groups should be created.
+group_by_instance_id = True
+group_by_region = True
+group_by_availability_zone = True
+group_by_ami_id = True
+group_by_instance_type = True
+group_by_key_pair = True
+group_by_vpc_id = True
+group_by_security_group = True
+group_by_tag_keys = True
+group_by_tag_none = True
+group_by_route53_names = True
+group_by_rds_engine = True
+group_by_rds_parameter_group = True
+group_by_elasticache_engine = True
+group_by_elasticache_cluster = True
+group_by_elasticache_parameter_group = True
+group_by_elasticache_replication_group = True
+
+# If you only want to include hosts that match a certain regular expression
+# pattern_include = staging-*
+
+# If you want to exclude any hosts that match a certain regular expression
+# pattern_exclude = staging-*
+
+# Instance filters can be used to control which instances are retrieved for
+# inventory. For the full list of possible filters, please read the EC2 API
+# docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
+# Filters are key/value pairs separated by '=', to list multiple filters use
+# a list separated by commas. See examples below.
+
+# Retrieve only instances with (key=value) env=staging tag
+# instance_filters = tag:env=staging
+
+# Retrieve only instances with role=webservers OR role=dbservers tag
+# instance_filters = tag:role=webservers,tag:role=dbservers
+
+# Retrieve only t1.micro instances OR instances with tag env=staging
+# instance_filters = instance-type=t1.micro,tag:env=staging
+
+# You can use wildcards in filter values also. Below will list instances which
+# tag Name value matches webservers1*
+# (ex. webservers15, webservers1a, webservers123 etc)
+# instance_filters = tag:Name=webservers1*
+
+# A boto configuration profile may be used to separate out credentials
+# see http://boto.readthedocs.org/en/latest/boto_config_tut.html
+# boto_profile = some-boto-profile-name
diff --git a/inventory/aws/hosts/ec2.py b/inventory/aws/hosts/ec2.py
index f231ff4c2..8b878cafd 100755
--- a/inventory/aws/hosts/ec2.py
+++ b/inventory/aws/hosts/ec2.py
@@ -22,6 +22,12 @@ you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
+If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
+using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
+the AWS_PROFILE variable:
+
+ AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
+
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
@@ -121,8 +127,11 @@ from time import time
import boto
from boto import ec2
from boto import rds
+from boto import elasticache
from boto import route53
-import ConfigParser
+import six
+
+from six.moves import configparser
from collections import defaultdict
try:
@@ -145,9 +154,18 @@ class Ec2Inventory(object):
# Index of hostname (address) to instance ID
self.index = {}
+ # Boto profile to use (if any)
+ self.boto_profile = None
+
# Read settings and parse CLI arguments
- self.read_settings()
self.parse_cli_args()
+ self.read_settings()
+
+ # Make sure that profile_name is not passed at all if not set
+ # as pre 2.24 boto will fall over otherwise
+ if self.boto_profile:
+ if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
+ self.fail_with_error("boto version must be >= 2.24 to use profile")
# Cache
if self.args.refresh_cache:
@@ -166,7 +184,7 @@ class Ec2Inventory(object):
else:
data_to_print = self.json_format_dict(self.inventory, True)
- print data_to_print
+ print(data_to_print)
def is_cache_valid(self):
@@ -184,10 +202,12 @@ class Ec2Inventory(object):
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
-
- config = ConfigParser.SafeConfigParser()
+ if six.PY3:
+ config = configparser.ConfigParser()
+ else:
+ config = configparser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
- ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)
+ ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
config.read(ec2_ini_path)
# is eucalyptus?
@@ -236,18 +256,72 @@ class Ec2Inventory(object):
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
- # Return all EC2 and RDS instances (if RDS is enabled)
+ # Include ElastiCache instances?
+ self.elasticache_enabled = True
+ if config.has_option('ec2', 'elasticache'):
+ self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
+
+ # Return all EC2 instances?
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
+
+ # Instance states to be gathered in inventory. Default is 'running'.
+ # Setting 'all_instances' to 'yes' overrides this option.
+ ec2_valid_instance_states = [
+ 'pending',
+ 'running',
+ 'shutting-down',
+ 'terminated',
+ 'stopping',
+ 'stopped'
+ ]
+ self.ec2_instance_states = []
+ if self.all_instances:
+ self.ec2_instance_states = ec2_valid_instance_states
+ elif config.has_option('ec2', 'instance_states'):
+ for instance_state in config.get('ec2', 'instance_states').split(','):
+ instance_state = instance_state.strip()
+ if instance_state not in ec2_valid_instance_states:
+ continue
+ self.ec2_instance_states.append(instance_state)
+ else:
+ self.ec2_instance_states = ['running']
+
+ # Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
+ # Return all ElastiCache replication groups? (if ElastiCache is enabled)
+ if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
+ self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
+ else:
+ self.all_elasticache_replication_groups = False
+
+ # Return all ElastiCache clusters? (if ElastiCache is enabled)
+ if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
+ self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
+ else:
+ self.all_elasticache_clusters = False
+
+ # Return all ElastiCache nodes? (if ElastiCache is enabled)
+ if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
+ self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
+ else:
+ self.all_elasticache_nodes = False
+
+ # boto configuration profile (prefer CLI argument)
+ self.boto_profile = self.args.boto_profile
+ if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
+ self.boto_profile = config.get('ec2', 'boto_profile')
+
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
+ if self.boto_profile:
+ cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
@@ -261,6 +335,12 @@ class Ec2Inventory(object):
else:
self.nested_groups = False
+ # Replace dash or not in group names
+ if config.has_option('ec2', 'replace_dash_in_groups'):
+ self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
+ else:
+ self.replace_dash_in_groups = True
+
# Configure which groups should be created.
group_by_options = [
'group_by_instance_id',
@@ -276,6 +356,10 @@ class Ec2Inventory(object):
'group_by_route53_names',
'group_by_rds_engine',
'group_by_rds_parameter_group',
+ 'group_by_elasticache_engine',
+ 'group_by_elasticache_cluster',
+ 'group_by_elasticache_parameter_group',
+ 'group_by_elasticache_replication_group',
]
for option in group_by_options:
if config.has_option('ec2', option):
@@ -290,7 +374,7 @@ class Ec2Inventory(object):
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
- except ConfigParser.NoOptionError, e:
+ except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
@@ -300,7 +384,7 @@ class Ec2Inventory(object):
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
- except ConfigParser.NoOptionError, e:
+ except configparser.NoOptionError:
self.pattern_exclude = None
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
@@ -325,6 +409,8 @@ class Ec2Inventory(object):
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
+ parser.add_argument('--boto-profile', action='store',
+ help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
@@ -338,30 +424,52 @@ class Ec2Inventory(object):
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
+ if self.elasticache_enabled:
+ self.get_elasticache_clusters_by_region(region)
+ self.get_elasticache_replication_groups_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
+ def connect(self, region):
+ ''' create connection to api server'''
+ if self.eucalyptus:
+ conn = boto.connect_euca(host=self.eucalyptus_host)
+ conn.APIVersion = '2010-08-31'
+ else:
+ conn = self.connect_to_aws(ec2, region)
+ return conn
+
+ def boto_fix_security_token_in_profile(self, connect_args):
+ ''' monkey patch for boto issue boto/boto#2100 '''
+ profile = 'profile ' + self.boto_profile
+ if boto.config.has_option(profile, 'aws_security_token'):
+ connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
+ return connect_args
+
+ def connect_to_aws(self, module, region):
+ connect_args = {}
+
+ # only pass the profile name if it's set (as it is not supported by older boto versions)
+ if self.boto_profile:
+ connect_args['profile_name'] = self.boto_profile
+ self.boto_fix_security_token_in_profile(connect_args)
+
+ conn = module.connect_to_region(region, **connect_args)
+ # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
+ if conn is None:
+ self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
+ return conn
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
- if self.eucalyptus:
- conn = boto.connect_euca(host=self.eucalyptus_host)
- conn.APIVersion = '2010-08-31'
- else:
- conn = ec2.connect_to_region(region)
-
- # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
- if conn is None:
- print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
- sys.exit(1)
-
+ conn = self.connect(region)
reservations = []
if self.ec2_instance_filters:
- for filter_key, filter_values in self.ec2_instance_filters.iteritems():
+ for filter_key, filter_values in self.ec2_instance_filters.items():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
else:
reservations = conn.get_all_instances()
@@ -370,40 +478,130 @@ class Ec2Inventory(object):
for instance in reservation.instances:
self.add_instance(instance, region)
- except boto.exception.BotoServerError, e:
- if not self.eucalyptus:
- print "Looks like AWS is down again:"
- print e
- sys.exit(1)
+ except boto.exception.BotoServerError as e:
+ if e.error_code == 'AuthFailure':
+ error = self.get_auth_error_message()
+ else:
+ backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
+ error = "Error connecting to %s backend.\n%s" % (backend, e.message)
+ self.fail_with_error(error, 'getting EC2 instances')
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
- conn = rds.connect_to_region(region)
+ conn = self.connect_to_aws(rds, region)
if conn:
instances = conn.get_all_dbinstances()
for instance in instances:
self.add_rds_instance(instance, region)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
+ error = e.reason
+
+ if e.error_code == 'AuthFailure':
+ error = self.get_auth_error_message()
if not e.reason == "Forbidden":
- print "Looks like AWS RDS is down: "
- print e
- sys.exit(1)
+ error = "Looks like AWS RDS is down:\n%s" % e.message
+ self.fail_with_error(error, 'getting RDS instances')
- def get_instance(self, region, instance_id):
- ''' Gets details about a specific instance '''
- if self.eucalyptus:
- conn = boto.connect_euca(self.eucalyptus_host)
- conn.APIVersion = '2010-08-31'
+ def get_elasticache_clusters_by_region(self, region):
+ ''' Makes an AWS API call to the list of ElastiCache clusters (with
+ nodes' info) in a particular region.'''
+
+ # ElastiCache boto module doesn't provide a get_all_intances method,
+ # that's why we need to call describe directly (it would be called by
+ # the shorthand method anyway...)
+ try:
+ conn = elasticache.connect_to_region(region)
+ if conn:
+ # show_cache_node_info = True
+ # because we also want nodes' information
+ response = conn.describe_cache_clusters(None, None, None, True)
+
+ except boto.exception.BotoServerError as e:
+ error = e.reason
+
+ if e.error_code == 'AuthFailure':
+ error = self.get_auth_error_message()
+ if not e.reason == "Forbidden":
+ error = "Looks like AWS ElastiCache is down:\n%s" % e.message
+ self.fail_with_error(error, 'getting ElastiCache clusters')
+
+ try:
+ # Boto also doesn't provide wrapper classes to CacheClusters or
+ # CacheNodes. Because of that wo can't make use of the get_list
+ # method in the AWSQueryConnection. Let's do the work manually
+ clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
+
+ except KeyError as e:
+ error = "ElastiCache query to AWS failed (unexpected format)."
+ self.fail_with_error(error, 'getting ElastiCache clusters')
+
+ for cluster in clusters:
+ self.add_elasticache_cluster(cluster, region)
+
+ def get_elasticache_replication_groups_by_region(self, region):
+ ''' Makes an AWS API call to the list of ElastiCache replication groups
+ in a particular region.'''
+
+ # ElastiCache boto module doesn't provide a get_all_intances method,
+ # that's why we need to call describe directly (it would be called by
+ # the shorthand method anyway...)
+ try:
+ conn = elasticache.connect_to_region(region)
+ if conn:
+ response = conn.describe_replication_groups()
+
+ except boto.exception.BotoServerError as e:
+ error = e.reason
+
+ if e.error_code == 'AuthFailure':
+ error = self.get_auth_error_message()
+ if not e.reason == "Forbidden":
+ error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
+ self.fail_with_error(error, 'getting ElastiCache clusters')
+
+ try:
+ # Boto also doesn't provide wrapper classes to ReplicationGroups
+ # Because of that wo can't make use of the get_list method in the
+ # AWSQueryConnection. Let's do the work manually
+ replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
+
+ except KeyError as e:
+ error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
+ self.fail_with_error(error, 'getting ElastiCache clusters')
+
+ for replication_group in replication_groups:
+ self.add_elasticache_replication_group(replication_group, region)
+
+ def get_auth_error_message(self):
+ ''' create an informative error message if there is an issue authenticating'''
+ errors = ["Authentication error retrieving ec2 inventory."]
+ if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
+ errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
else:
- conn = ec2.connect_to_region(region)
+ errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
- # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
- if conn is None:
- print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
- sys.exit(1)
+ boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
+ boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
+ if len(boto_config_found) > 0:
+ errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
+ else:
+ errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
+
+ return '\n'.join(errors)
+
+ def fail_with_error(self, err_msg, err_operation=None):
+ '''log an error to std err for ansible-playbook to consume and exit'''
+ if err_operation:
+ err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
+ err_msg=err_msg, err_operation=err_operation)
+ sys.stderr.write(err_msg)
+ sys.exit(1)
+
+ def get_instance(self, region, instance_id):
+ conn = self.connect(region)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
@@ -414,8 +612,8 @@ class Ec2Inventory(object):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
- # Only want running instances unless all_instances is True
- if not self.all_instances and instance.state != 'running':
+ # Only return instances with desired instance states
+ if instance.state not in self.ec2_instance_states:
return
# Select the best destination address
@@ -502,18 +700,21 @@ class Ec2Inventory(object):
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
- print 'Package boto seems a bit older.'
- print 'Please upgrade boto >= 2.3.0.'
- sys.exit(1)
+ self.fail_with_error('\n'.join(['Package boto seems a bit older.',
+ 'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
- for k, v in instance.tags.iteritems():
- key = self.to_safe("tag_" + k + "=" + v)
+ for k, v in instance.tags.items():
+ if v:
+ key = self.to_safe("tag_" + k + "=" + v)
+ else:
+ key = self.to_safe("tag_" + k)
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
- self.push_group(self.inventory, self.to_safe("tag_" + k), key)
+ if v:
+ self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
@@ -597,9 +798,9 @@ class Ec2Inventory(object):
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
- print 'Package boto seems a bit older.'
- print 'Please upgrade boto >= 2.3.0.'
- sys.exit(1)
+ self.fail_with_error('\n'.join(['Package boto seems a bit older.',
+ 'Please upgrade boto >= 2.3.0.']))
+
# Inventory: Group by engine
if self.group_by_rds_engine:
@@ -618,6 +819,243 @@ class Ec2Inventory(object):
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
+ def add_elasticache_cluster(self, cluster, region):
+ ''' Adds an ElastiCache cluster to the inventory and index, as long as
+ it's nodes are addressable '''
+
+ # Only want available clusters unless all_elasticache_clusters is True
+ if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
+ return
+
+ # Select the best destination address
+ if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
+ # Memcached cluster
+ dest = cluster['ConfigurationEndpoint']['Address']
+ is_redis = False
+ else:
+ # Redis sigle node cluster
+ # Because all Redis clusters are single nodes, we'll merge the
+ # info from the cluster with info about the node
+ dest = cluster['CacheNodes'][0]['Endpoint']['Address']
+ is_redis = True
+
+ if not dest:
+ # Skip clusters we cannot address (e.g. private VPC subnet)
+ return
+
+ # Add to index
+ self.index[dest] = [region, cluster['CacheClusterId']]
+
+ # Inventory: Group by instance ID (always a group of 1)
+ if self.group_by_instance_id:
+ self.inventory[cluster['CacheClusterId']] = [dest]
+ if self.nested_groups:
+ self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
+
+ # Inventory: Group by region
+ if self.group_by_region and not is_redis:
+ self.push(self.inventory, region, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'regions', region)
+
+ # Inventory: Group by availability zone
+ if self.group_by_availability_zone and not is_redis:
+ self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
+ if self.nested_groups:
+ if self.group_by_region:
+ self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
+ self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
+
+ # Inventory: Group by node type
+ if self.group_by_instance_type and not is_redis:
+ type_name = self.to_safe('type_' + cluster['CacheNodeType'])
+ self.push(self.inventory, type_name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'types', type_name)
+
+ # Inventory: Group by VPC (information not available in the current
+ # AWS API version for ElastiCache)
+
+ # Inventory: Group by security group
+ if self.group_by_security_group and not is_redis:
+
+ # Check for the existence of the 'SecurityGroups' key and also if
+ # this key has some value. When the cluster is not placed in a SG
+ # the query can return None here and cause an error.
+ if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
+ for security_group in cluster['SecurityGroups']:
+ key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
+ self.push(self.inventory, key, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'security_groups', key)
+
+ # Inventory: Group by engine
+ if self.group_by_elasticache_engine and not is_redis:
+ self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
+
+ # Inventory: Group by parameter group
+ if self.group_by_elasticache_parameter_group:
+ self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
+
+ # Inventory: Group by replication group
+ if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
+ self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
+
+ # Global Tag: all ElastiCache clusters
+ self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
+
+ host_info = self.get_host_info_dict_from_describe_dict(cluster)
+
+ self.inventory["_meta"]["hostvars"][dest] = host_info
+
+ # Add the nodes
+ for node in cluster['CacheNodes']:
+ self.add_elasticache_node(node, cluster, region)
+
+ def add_elasticache_node(self, node, cluster, region):
+ ''' Adds an ElastiCache node to the inventory and index, as long as
+ it is addressable '''
+
+ # Only want available nodes unless all_elasticache_nodes is True
+ if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
+ return
+
+ # Select the best destination address
+ dest = node['Endpoint']['Address']
+
+ if not dest:
+ # Skip nodes we cannot address (e.g. private VPC subnet)
+ return
+
+ node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
+
+ # Add to index
+ self.index[dest] = [region, node_id]
+
+ # Inventory: Group by node ID (always a group of 1)
+ if self.group_by_instance_id:
+ self.inventory[node_id] = [dest]
+ if self.nested_groups:
+ self.push_group(self.inventory, 'instances', node_id)
+
+ # Inventory: Group by region
+ if self.group_by_region:
+ self.push(self.inventory, region, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'regions', region)
+
+ # Inventory: Group by availability zone
+ if self.group_by_availability_zone:
+ self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
+ if self.nested_groups:
+ if self.group_by_region:
+ self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
+ self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
+
+ # Inventory: Group by node type
+ if self.group_by_instance_type:
+ type_name = self.to_safe('type_' + cluster['CacheNodeType'])
+ self.push(self.inventory, type_name, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'types', type_name)
+
+ # Inventory: Group by VPC (information not available in the current
+ # AWS API version for ElastiCache)
+
+ # Inventory: Group by security group
+ if self.group_by_security_group:
+
+ # Check for the existence of the 'SecurityGroups' key and also if
+ # this key has some value. When the cluster is not placed in a SG
+ # the query can return None here and cause an error.
+ if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
+ for security_group in cluster['SecurityGroups']:
+ key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
+ self.push(self.inventory, key, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'security_groups', key)
+
+ # Inventory: Group by engine
+ if self.group_by_elasticache_engine:
+ self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
+
+ # Inventory: Group by parameter group (done at cluster level)
+
+ # Inventory: Group by replication group (done at cluster level)
+
+ # Inventory: Group by ElastiCache Cluster
+ if self.group_by_elasticache_cluster:
+ self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
+
+ # Global Tag: all ElastiCache nodes
+ self.push(self.inventory, 'elasticache_nodes', dest)
+
+ host_info = self.get_host_info_dict_from_describe_dict(node)
+
+ if dest in self.inventory["_meta"]["hostvars"]:
+ self.inventory["_meta"]["hostvars"][dest].update(host_info)
+ else:
+ self.inventory["_meta"]["hostvars"][dest] = host_info
+
+ def add_elasticache_replication_group(self, replication_group, region):
+ ''' Adds an ElastiCache replication group to the inventory and index '''
+
+ # Only want available clusters unless all_elasticache_replication_groups is True
+ if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
+ return
+
+ # Select the best destination address (PrimaryEndpoint)
+ dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
+
+ if not dest:
+ # Skip clusters we cannot address (e.g. private VPC subnet)
+ return
+
+ # Add to index
+ self.index[dest] = [region, replication_group['ReplicationGroupId']]
+
+ # Inventory: Group by ID (always a group of 1)
+ if self.group_by_instance_id:
+ self.inventory[replication_group['ReplicationGroupId']] = [dest]
+ if self.nested_groups:
+ self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
+
+ # Inventory: Group by region
+ if self.group_by_region:
+ self.push(self.inventory, region, dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'regions', region)
+
+ # Inventory: Group by availability zone (doesn't apply to replication groups)
+
+ # Inventory: Group by node type (doesn't apply to replication groups)
+
+ # Inventory: Group by VPC (information not available in the current
+ # AWS API version for replication groups
+
+ # Inventory: Group by security group (doesn't apply to replication groups)
+ # Check this value in cluster level
+
+ # Inventory: Group by engine (replication groups are always Redis)
+ if self.group_by_elasticache_engine:
+ self.push(self.inventory, 'elasticache_redis', dest)
+ if self.nested_groups:
+ self.push_group(self.inventory, 'elasticache_engines', 'redis')
+
+ # Global Tag: all ElastiCache clusters
+ self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
+
+ host_info = self.get_host_info_dict_from_describe_dict(replication_group)
+
+ self.inventory["_meta"]["hostvars"][dest] = host_info
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
@@ -666,7 +1104,6 @@ class Ec2Inventory(object):
return list(name_list)
-
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
@@ -683,7 +1120,7 @@ class Ec2Inventory(object):
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
- elif type(value) in [str, unicode]:
+ elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
@@ -692,7 +1129,7 @@ class Ec2Inventory(object):
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
- for k, v in value.iteritems():
+ for k, v in value.items():
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
@@ -712,6 +1149,91 @@ class Ec2Inventory(object):
return instance_vars
+ def get_host_info_dict_from_describe_dict(self, describe_dict):
+ ''' Parses the dictionary returned by the API call into a flat list
+ of parameters. This method should be used only when 'describe' is
+ used directly because Boto doesn't provide specific classes. '''
+
+ # I really don't agree with prefixing everything with 'ec2'
+ # because EC2, RDS and ElastiCache are different services.
+ # I'm just following the pattern used until now to not break any
+ # compatibility.
+
+ host_info = {}
+ for key in describe_dict:
+ value = describe_dict[key]
+ key = self.to_safe('ec2_' + self.uncammelize(key))
+
+ # Handle complex types
+
+ # Target: Memcached Cache Clusters
+ if key == 'ec2_configuration_endpoint' and value:
+ host_info['ec2_configuration_endpoint_address'] = value['Address']
+ host_info['ec2_configuration_endpoint_port'] = value['Port']
+
+ # Target: Cache Nodes and Redis Cache Clusters (single node)
+ if key == 'ec2_endpoint' and value:
+ host_info['ec2_endpoint_address'] = value['Address']
+ host_info['ec2_endpoint_port'] = value['Port']
+
+ # Target: Redis Replication Groups
+ if key == 'ec2_node_groups' and value:
+ host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
+ host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
+ replica_count = 0
+ for node in value[0]['NodeGroupMembers']:
+ if node['CurrentRole'] == 'primary':
+ host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
+ host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
+ host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
+ elif node['CurrentRole'] == 'replica':
+ host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
+ host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
+ host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
+ replica_count += 1
+
+ # Target: Redis Replication Groups
+ if key == 'ec2_member_clusters' and value:
+ host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
+
+ # Target: All Cache Clusters
+ elif key == 'ec2_cache_parameter_group':
+ host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
+ host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
+ host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
+
+ # Target: Almost everything
+ elif key == 'ec2_security_groups':
+
+ # Skip if SecurityGroups is None
+ # (it is possible to have the key defined but no value in it).
+ if value is not None:
+ sg_ids = []
+ for sg in value:
+ sg_ids.append(sg['SecurityGroupId'])
+ host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
+
+ # Target: Everything
+ # Preserve booleans and integers
+ elif type(value) in [int, bool]:
+ host_info[key] = value
+
+ # Target: Everything
+ # Sanitize string values
+ elif isinstance(value, six.string_types):
+ host_info[key] = value.strip()
+
+ # Target: Everything
+ # Replace None by an empty string
+ elif type(value) == type(None):
+ host_info[key] = ''
+
+ else:
+ # Remove non-processed complex types
+ pass
+
+ return host_info
+
def get_host_info(self):
''' Get variables about a specific host '''
@@ -775,13 +1297,16 @@ class Ec2Inventory(object):
cache.write(json_data)
cache.close()
+ def uncammelize(self, key):
+ temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
+ return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
- ''' Converts 'bad' characters in a string to underscores so they can be
- used as Ansible groups '''
-
- return re.sub("[^A-Za-z0-9\-]", "_", word)
-
+ ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
+ regex = "[^A-Za-z0-9\_"
+ if not self.replace_dash_in_groups:
+ regex += "\-"
+ return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
diff --git a/inventory/byo/hosts.example b/inventory/byo/hosts.example
index 56bbb9612..799725a37 100644
--- a/inventory/byo/hosts.example
+++ b/inventory/byo/hosts.example
@@ -36,6 +36,10 @@ deployment_type=atomic-enterprise
# Origin copr repo
#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
+# Origin Fedora copr repo
+# Use this if you are installing on Fedora
+#openshift_additional_repos=[{'id': 'fedora-openshift-origin-copr', 'name': 'OpenShift Origin COPR for Fedora', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/', 'enabled': 1, 'gpgcheck': 1, gpgkey: 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg'}]
+
# htpasswd auth
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/openshift/htpasswd'}]
@@ -111,8 +115,17 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# set RPM version for debugging purposes
#openshift_pkg_version=-3.0.0.0
-# Configure custom master certificates
+# Configure custom named certificates
+# NOTE: openshift_master_named_certificates is cached on masters and is an
+# additive fact, meaning that each run with a different set of certificates
+# will add the newly provided certificates to the cached set of certificates.
+# If you would like openshift_master_named_certificates to be overwritten with
+# the provided value, specify openshift_master_overwrite_named_certificates.
+#openshift_master_overwrite_named_certificates: true
+#
+# Provide local certificate paths which will be deployed to masters
#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key"}]
+#
# Detected names may be overridden by specifying the "names" key
#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"]}]
@@ -135,6 +148,19 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# configure how often node iptables rules are refreshed
#openshift_node_iptables_sync_period=5s
+# Configure nodeIP in the node config
+# This is needed in cases where node traffic is desired to go over an
+# interface other than the default network interface.
+#openshift_node_set_node_ip=True
+
+# Force setting of system hostname when configuring OpenShift
+# This works around issues related to installations that do not have valid dns
+# entries for the interfaces attached to the host.
+#openshift_set_hostname=True
+
+# Configure dnsIP in the node config
+#openshift_dns_ip=172.30.0.1
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
@@ -147,7 +173,7 @@ ose3-lb-ansible.test.example.com
# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
# However, in order to ensure that your masters are not burdened with running pods you should
-# make them unschedulable by adding openshift_scheduleable=False any node that's also a master.
+# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
[nodes]
ose3-master[1:3]-ansible.test.example.com
ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/inventory/multi_inventory.py b/inventory/multi_inventory.py
index 354a8c10c..232f2402d 100755
--- a/inventory/multi_inventory.py
+++ b/inventory/multi_inventory.py
@@ -288,26 +288,30 @@ class MultiInventory(object):
results = self.all_inventory_results[acc_config['name']]
results['all_hosts'] = results['_meta']['hostvars'].keys()
- # Update each hostvar with the newly desired key: value from extra_*
- for _extra in ['extra_vars', 'extra_groups']:
- for new_var, value in acc_config.get(_extra, {}).items():
- for data in results['_meta']['hostvars'].values():
- self.add_entry(data, new_var, value)
-
- # Add this group
- if _extra == 'extra_groups':
- results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts'])
-
- # Clone groups goes here
- for to_name, from_name in acc_config.get('clone_groups', {}).items():
- if results.has_key(from_name):
- results[to_name] = copy.copy(results[from_name])
+ # Extra vars go here
+ for new_var, value in acc_config.get('extra_vars', {}).items():
+ for data in results['_meta']['hostvars'].values():
+ self.add_entry(data, new_var, value)
- # Clone vars goes here
+ # Clone vars go here
for to_name, from_name in acc_config.get('clone_vars', {}).items():
for data in results['_meta']['hostvars'].values():
self.add_entry(data, to_name, self.get_entry(data, from_name))
+ # Extra groups go here
+ for new_var, value in acc_config.get('extra_groups', {}).items():
+ for data in results['_meta']['hostvars'].values():
+ results["%s_%s" % (new_var, value)] = copy.copy(results['all_hosts'])
+
+ # Clone groups go here
+ # Build a group based on the desired key name
+ for to_name, from_name in acc_config.get('clone_groups', {}).items():
+ for name, data in results['_meta']['hostvars'].items():
+ key = '%s_%s' % (to_name, self.get_entry(data, from_name))
+ if not results.has_key(key):
+ results[key] = []
+ results[key].append(name)
+
# store the results back into all_inventory_results
self.all_inventory_results[acc_config['name']] = results
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 21f624400..09569761f 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.12
+Version: 3.0.16
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -13,7 +13,8 @@ URL: https://github.com/openshift/openshift-ansible
Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
BuildArch: noarch
-Requires: ansible
+Requires: ansible >= 1.9.3
+Requires: python2
%description
Openshift and Atomic Enterprise Ansible
@@ -96,8 +97,9 @@ popd
# ----------------------------------------------------------------------------------
%package bin
Summary: Openshift and Atomic Enterprise Ansible Scripts for working with metadata hosts
-Requires: %{name}-inventory
-Requires: python2
+Requires: %{name} = %{version}
+Requires: %{name}-inventory = %{version}
+Requires: %{name}-playbooks = %{version}
BuildRequires: python2-devel
BuildArch: noarch
@@ -117,7 +119,7 @@ Scripts to make it nicer when working with hosts that are defined only by metada
# ----------------------------------------------------------------------------------
%package docs
Summary: Openshift and Atomic Enterprise Ansible documents
-Requires: %{name}
+Requires: %{name} = %{version}
BuildArch: noarch
%description docs
@@ -131,7 +133,7 @@ BuildArch: noarch
# ----------------------------------------------------------------------------------
%package inventory
Summary: Openshift and Atomic Enterprise Ansible Inventories
-Requires: python2
+Requires: %{name} = %{version}
BuildArch: noarch
%description inventory
@@ -144,7 +146,7 @@ Ansible Inventories used with the openshift-ansible scripts and playbooks.
%package inventory-aws
Summary: Openshift and Atomic Enterprise Ansible Inventories for AWS
-Requires: %{name}-inventory
+Requires: %{name}-inventory = %{version}
Requires: python-boto
BuildArch: noarch
@@ -156,7 +158,7 @@ Ansible Inventories for AWS used with the openshift-ansible scripts and playbook
%package inventory-gce
Summary: Openshift and Atomic Enterprise Ansible Inventories for GCE
-Requires: %{name}-inventory
+Requires: %{name}-inventory = %{version}
Requires: python-libcloud >= 0.13
BuildArch: noarch
@@ -172,10 +174,10 @@ Ansible Inventories for GCE used with the openshift-ansible scripts and playbook
# ----------------------------------------------------------------------------------
%package playbooks
Summary: Openshift and Atomic Enterprise Ansible Playbooks
-Requires: %{name}
-Requires: %{name}-roles
-Requires: %{name}-lookup-plugins
-Requires: %{name}-filter-plugins
+Requires: %{name} = %{version}
+Requires: %{name}-roles = %{version}
+Requires: %{name}-lookup-plugins = %{version}
+Requires: %{name}-filter-plugins = %{version}
BuildArch: noarch
%description playbooks
@@ -191,8 +193,8 @@ BuildArch: noarch
%package roles
Summary: Openshift and Atomic Enterprise Ansible roles
Requires: %{name}
-Requires: %{name}-lookup-plugins
-Requires: %{name}-filter-plugins
+Requires: %{name}-lookup-plugins = %{version}
+Requires: %{name}-filter-plugins = %{version}
BuildArch: noarch
%description roles
@@ -238,9 +240,7 @@ BuildArch: noarch
%package -n atomic-openshift-utils
Summary: Atomic OpenShift Utilities
BuildRequires: python-setuptools
-Requires: openshift-ansible-playbooks
-Requires: openshift-ansible-roles
-Requires: ansible
+Requires: %{name}-playbooks >= %{version}
Requires: python-click
Requires: python-setuptools
Requires: PyYAML
@@ -258,6 +258,129 @@ Atomic OpenShift Utilities includes
%changelog
+* Tue Nov 24 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.16-1
+- Silencing pylint branch errors for now for the atomic-openshift-installer
+ harness (bleanhar@redhat.com)
+- Properly setting scheduleability for HA Master scenarios
+ (bleanhar@redhat.com)
+- added graphs (mwoodson@redhat.com)
+- Rework setting of hostname (jdetiber@redhat.com)
+- Fixed a bug in the actions. It now supports changing opconditions
+ (kwoodson@redhat.com)
+- Conditionally set the nodeIP (jdetiber@redhat.com)
+- Bug 1284991 - "atomic-openshift-installer uninstall" error when configuration
+ file is missing. (bleanhar@redhat.com)
+- Avoid printing the master and node totals in the add-a-node scenario
+ (bleanhar@redhat.com)
+- Fixing tests for quick_ha (bleanhar@redhat.com)
+- Removing a debug line (bleanhar@redhat.com)
+- atomic-openshift-installer: Fix lint issue (smunilla@redhat.com)
+- Handling preconfigured load balancers (bleanhar@redhat.com)
+- atomic-openshift-installer: Rename ha_proxy (smunilla@redhat.com)
+- atomic-openshift-installer: Reverse version and host collection
+ (smunilla@redhat.com)
+- cli_installer_tests: Add test for unattended quick HA (smunilla@redhat.com)
+- Breakup inventory writing (smunilla@redhat.com)
+- Enforce 1 or 3 masters (smunilla@redhat.com)
+- Add interactive test (smunilla@redhat.com)
+- atomic-openshift-installer: HA for quick installer (smunilla@redhat.com)
+- Adding zbx_graph support (kwoodson@redhat.com)
+- Modified step params to be in order when passed as a list
+ (kwoodson@redhat.com)
+- Add serviceAccountConfig.masterCA during 3.1 upgrade (jdetiber@redhat.com)
+- Use the identity_providers from openshift_facts instead of always using the
+ inventory variable (jdetiber@redhat.com)
+- Refactor master identity provider configuration (jdetiber@redhat.com)
+
+* Fri Nov 20 2015 Kenny Woodson <kwoodson@redhat.com> 3.0.15-1
+- Fixing clone group functionality. Also separating extra_vars from
+ extra_groups (kwoodson@redhat.com)
+- Check the end result on bad config file (smunilla@redhat.com)
+- Add some tests for a bad config (smunilla@redhat.com)
+- atomic-openshift-installer: connect_to error handling (smunilla@redhat.com)
+- atomic-openshift-installer: pylint fixes (smunilla@redhat.com)
+- Replace map with oo_collect to support python-jinja2 <2.7
+ (abutcher@redhat.com)
+- Making the uninstall playbook more flexible (bleanhar@redhat.com)
+- Install version dependent image streams for v1.0 and v1.1
+ (sdodson@redhat.com)
+- Do not update the hostname (jdetiber@redhat.com)
+- Pylint fix for long line in cli docstring. (dgoodwin@redhat.com)
+- Default to installing OSE 3.1 instead of 3.0. (dgoodwin@redhat.com)
+- Fix tests on systems with openshift-ansible rpms installed.
+ (dgoodwin@redhat.com)
+
+* Thu Nov 19 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.14-1
+- added metric items to zabbix for openshift online (mwoodson@redhat.com)
+- Updating usergroups to accept users (kwoodson@redhat.com)
+- Differentiate machine types on GCE (master and nodes)
+ (romain.dossin@amadeus.com)
+- Uninstall - Remove systemd wants file for node (jdetiber@redhat.com)
+- ec2 - force !requiretty for ssh_user (jdetiber@redhat.com)
+- small tweaks for adding docker volume for aws master hosts
+ (jdetiber@redhat.com)
+- Created role to deploy ops host monitoring (jdiaz@redhat.com)
+- Update certificate paths when 'names' key is provided. (abutcher@redhat.com)
+- add a volume on master host, in AWS provisioning (chengcheng.mu@amadeus.com)
+- First attempt at adding web scenarios (kwoodson@redhat.com)
+- Use field numbers for all formats in bin/cluster for python 2.6
+ (abutcher@redhat.com)
+- atomic-openshift-installer: Correct single master case (smunilla@redhat.com)
+- added copr-openshift-ansible releaser, removed old rel-eng stuff.
+ (twiest@redhat.com)
+- changed counter -> count (mwoodson@redhat.com)
+- Updating zbx_item classes to support data types for bool.
+ (kwoodson@redhat.com)
+- Fix ec2 instance type override (jdetiber@redhat.com)
+- updated my check to support the boolean data type (mwoodson@redhat.com)
+- Add additive_facts_to_overwrite instead of overwriting all additive_facts
+ (abutcher@redhat.com)
+- added healthz check and more pod count checks (mwoodson@redhat.com)
+- updating to the latest ec2.py (and re-patching with our changes).
+ (twiest@redhat.com)
+- atomic-openshift-installer: Temporarily restrict to single master
+ (smunilla@redhat.com)
+- openshift-ansible: Correct variable (smunilla@redhat.com)
+- Refactor named certificates. (abutcher@redhat.com)
+- atomic-openshift-utils: Version lock playbooks (smunilla@redhat.com)
+- Add the native ha services and configs to uninstall (jdetiber@redhat.com)
+- Bug 1282336 - Add additional seboolean for gluster (jdetiber@redhat.com)
+- Raise lifetime to 2 weeks for dynamic AWS items (jdiaz@redhat.com)
+- bin/cluster fix python 2.6 issue (jdetiber@redhat.com)
+- cluster list: break host types by subtype (lhuard@amadeus.com)
+- README_AWS: Add needed dependency (c.witt.1900@gmail.com)
+- Fix invalid sudo command test (takayoshi@gmail.com)
+- Docs: Fedora: Add missing dependencies and update to dnf. (public@omeid.me)
+- Gate upgrade steps for 3.0 to 3.1 upgrade (jdetiber@redhat.com)
+- added the tito and copr_cli roles (twiest@redhat.com)
+- pylint openshift_facts (jdetiber@redhat.com)
+- Update etcd default facts setting (jdetiber@redhat.com)
+- Update master facts prior to upgrading incase facts are missing.
+ (abutcher@redhat.com)
+- pre-upgrade-check: differentiates between port and targetPort in output
+ (smilner@redhat.com)
+- Better structure the output of the list playbook (lhuard@amadeus.com)
+- Add the sub-host-type tag to the libvirt VMs (lhuard@amadeus.com)
+- atomic-openshift-installer: Update nopwd sudo test (smunilla@redhat.com)
+- Fix pylint import errors for utils/test/. (dgoodwin@redhat.com)
+- atomic-openshift-installer: Update prompts and help messages
+ (smunilla@redhat.com)
+- Dependencies need to be added when a create occurs on SLA object.
+ (kwoodson@redhat.com)
+- Test additions for cli_installer:get_hosts_to_install_on
+ (bleanhar@redhat.com)
+- adding itservice (kwoodson@redhat.com)
+- remove netaddr dependency (tob@butter.sh)
+- Add pyOpenSSL to dependencies for Fedora. (public@omeid.me)
+- Vagrant RHEL registration cleanup (pep@redhat.com)
+- RH subscription: optional satellite and pkg update (pep@redhat.com)
+
+* Tue Nov 17 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.13-1
+- The aep3 images changed locations. (bleanhar@redhat.com)
+- atomic-openshift-installer: Correct single master case (smunilla@redhat.com)
+- atomic-openshift-installer: Temporarily restrict to single master
+ (smunilla@redhat.com)
+
* Wed Nov 11 2015 Brenton Leanhardt <bleanhar@redhat.com> 3.0.12-1
- Sync with the latest image streams (sdodson@redhat.com)
diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml
new file mode 100644
index 000000000..de9f36c8a
--- /dev/null
+++ b/playbooks/adhoc/bootstrap-fedora.yml
@@ -0,0 +1,5 @@
+- hosts: OSv3
+ gather_facts: false
+ tasks:
+ - name: install python and deps for ansible modules
+ raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index e0dbad900..08a2ea6fb 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -48,7 +48,39 @@
- pcsd
- yum: name={{ item }} state=absent
- when: not is_atomic | bool
+ when: ansible_pkg_mgr == "yum" and not is_atomic | bool
+ with_items:
+ - atomic-enterprise
+ - atomic-enterprise-master
+ - atomic-enterprise-node
+ - atomic-enterprise-sdn-ovs
+ - atomic-openshift
+ - atomic-openshift-clients
+ - atomic-openshift-master
+ - atomic-openshift-node
+ - atomic-openshift-sdn-ovs
+ - corosync
+ - etcd
+ - openshift
+ - openshift-master
+ - openshift-node
+ - openshift-sdn
+ - openshift-sdn-ovs
+ - openvswitch
+ - origin
+ - origin-clients
+ - origin-master
+ - origin-node
+ - origin-sdn-ovs
+ - pacemaker
+ - pcs
+ - tuned-profiles-atomic-enterprise-node
+ - tuned-profiles-atomic-openshift-node
+ - tuned-profiles-openshift-node
+ - tuned-profiles-origin-node
+
+ - dnf: name={{ item }} state=absent
+ when: ansible_pkg_mgr == "dnf" and not is_atomic | bool
with_items:
- atomic-enterprise
- atomic-enterprise-master
@@ -103,7 +135,7 @@
- shell: find /var/lib/openshift/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true
changed_when: False
- - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
+ - shell: docker rm -f "{{ item }}"-master "{{ item }}"-node
changed_when: False
failed_when: False
with_items:
@@ -111,12 +143,12 @@
- atomic-enterprise
- origin
- - shell: docker ps -a | grep Exited | grep "{{ item }}" | awk '{print $1}'
+ - shell: docker ps -a | grep Exited | egrep "{{ item }}" | awk '{print $1}'
changed_when: False
failed_when: False
register: exited_containers_to_delete
with_items:
- - aep3/aep
+ - aep3.*/aep
- openshift3/ose
- openshift/origin
@@ -125,13 +157,13 @@
failed_when: False
with_items: "{{ exited_containers_to_delete.results }}"
- - shell: docker images | grep {{ item }} | awk '{ print $3 }'
+ - shell: docker images | egrep {{ item }} | awk '{ print $3 }'
changed_when: False
failed_when: False
register: images_to_delete
with_items:
- - registry.access.redhat.com/openshift3
- - registry.access.redhat.com/aep3
+ - registry\.access\..*redhat\.com/openshift3
+ - registry\.access\..*redhat\.com/aep3
- docker.io/openshift
- shell: "docker rmi -f {{ item.stdout_lines | join(' ') }}"
@@ -152,11 +184,16 @@
- /etc/sysconfig/atomic-enterprise-master
- /etc/sysconfig/atomic-enterprise-node
- /etc/sysconfig/atomic-openshift-master
+ - /etc/sysconfig/atomic-openshift-master-api
+ - /etc/sysconfig/atomic-openshift-master-controllers
- /etc/sysconfig/atomic-openshift-node
- /etc/sysconfig/openshift-master
- /etc/sysconfig/openshift-node
- /etc/sysconfig/origin-master
+ - /etc/sysconfig/origin-master-api
+ - /etc/sysconfig/origin-master-controllers
- /etc/sysconfig/origin-node
+ - /etc/systemd/system/atomic-openshift-node.service.wants
- /root/.kube
- /run/openshift-sdn
- /usr/share/openshift/examples
@@ -165,6 +202,19 @@
- /var/lib/openshift
- /var/lib/origin
- /var/lib/pacemaker
-
+ - /usr/lib/systemd/system/atomic-openshift-master-api.service
+ - /usr/lib/systemd/system/atomic-openshift-master-controllers.service
+ - /usr/lib/systemd/system/origin-master-api.service
+ - /usr/lib/systemd/system/origin-master-controllers.service
+
+ # Since we are potentially removing the systemd unit files for separated
+ # master-api and master-controllers services, so we need to reload the
+ # systemd configuration manager
+ - name: Reload systemd manager configuration
+ command: systemctl daemon-reload
+
+- hosts: nodes
+ sudo: yes
+ tasks:
- name: restart docker
service: name=docker state=restarted
diff --git a/playbooks/aws/openshift-cluster/addNodes.yml b/playbooks/aws/openshift-cluster/addNodes.yml
new file mode 100644
index 000000000..fff3e401b
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/addNodes.yml
@@ -0,0 +1,39 @@
+---
+- name: Launch instance(s)
+ hosts: localhost
+ connection: local
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ - ["vars.{{ deployment_type }}.{{ cluster_id }}.yml", vars.defaults.yml]
+ vars:
+ oo_extend_env: True
+ tasks:
+ - fail:
+ msg: Deployment type not supported for aws provider yet
+ when: deployment_type == 'enterprise'
+
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
+ vars:
+ type: "compute"
+ count: "{{ num_nodes }}"
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
+
+ - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
+ vars:
+ type: "infra"
+ count: "{{ num_infra }}"
+ - include: tasks/launch_instances.yml
+ vars:
+ instances: "{{ node_names }}"
+ cluster: "{{ cluster_id }}"
+ type: "{{ k8s_type }}"
+ g_sub_host_type: "{{ sub_host_type }}"
+
+- include: scaleup.yml
+- include: list.yml
diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml
new file mode 100644
index 000000000..4415700a3
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/scaleup.yml
@@ -0,0 +1,34 @@
+---
+
+- hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
+ tasks:
+ - set_fact:
+ g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
+ - name: Evaluate oo_hosts_to_update
+ add_host:
+ name: "{{ item }}"
+ groups: oo_hosts_to_update
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ with_items: "{{ groups.nodes_to_add }}"
+
+- include: ../../common/openshift-cluster/update_repos_and_packages.yml
+
+- include: ../../common/openshift-cluster/scaleup.yml
+ vars:
+ g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
+ g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
+ g_new_nodes_group: 'nodes_to_add'
+ g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
+ g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+ g_nodeonmaster: true
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 2
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ ec2_private_ip_address }}"
+ openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index 9c699120b..99f0577fc 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -20,10 +20,6 @@
| default(deployment_vars[deployment_type].image, true) }}"
when: ec2_image is not defined and not ec2_image_name
- set_fact:
- ec2_instance_type: "{{ lookup('env', 'ec2_instance_type')
- | default(deployment_vars[deployment_type].type, true) }}"
- when: ec2_instance_type is not defined
-- set_fact:
ec2_keypair: "{{ lookup('env', 'ec2_keypair')
| default(deployment_vars[deployment_type].keypair, true) }}"
when: ec2_keypair is not defined
@@ -37,25 +33,25 @@
when: ec2_assign_public_ip is not defined
- set_fact:
- ec2_instance_type: "{{ ec2_master_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_instance_type: "{{ ec2_master_instance_type | default(lookup('env', 'ec2_master_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
ec2_security_groups: "{{ ec2_master_security_groups
| default(deployment_vars[deployment_type].security_groups, true) }}"
when: host_type == "master" and sub_host_type == "default"
- set_fact:
- ec2_instance_type: "{{ ec2_etcd_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_instance_type: "{{ ec2_etcd_instance_type | default(lookup('env', 'ec2_etcd_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
ec2_security_groups: "{{ ec2_etcd_security_groups
| default(deployment_vars[deployment_type].security_groups, true)}}"
when: host_type == "etcd" and sub_host_type == "default"
- set_fact:
- ec2_instance_type: "{{ ec2_infra_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_instance_type: "{{ ec2_infra_instance_type | default(lookup('env', 'ec2_infra_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
ec2_security_groups: "{{ ec2_infra_security_groups
| default(deployment_vars[deployment_type].security_groups, true) }}"
when: host_type == "node" and sub_host_type == "infra"
- set_fact:
- ec2_instance_type: "{{ ec2_node_instance_type | default(deployment_vars[deployment_type].type, true) }}"
+ ec2_instance_type: "{{ ec2_node_instance_type | default(lookup('env', 'ec2_node_instance_type') | default(lookup('env', 'ec2_instance_type') | default(deployment_vars[deployment_type].type, true), true), true) }}"
ec2_security_groups: "{{ ec2_node_security_groups
| default(deployment_vars[deployment_type].security_groups, true) }}"
when: host_type == "node" and sub_host_type == "compute"
@@ -81,7 +77,6 @@
- set_fact:
latest_ami: "{{ ami_result.results | oo_ami_selector(ec2_image_name) }}"
- user_data: "{{ lookup('template', '../templates/user_data.j2') }}"
volume_defs:
etcd:
root:
@@ -97,6 +92,10 @@
volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}"
device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}"
iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}"
+ docker:
+ volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(10, true) }}"
+ device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}"
+ iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}"
node:
root:
volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(85, true) }}"
@@ -121,7 +120,7 @@
count: "{{ instances | length }}"
vpc_subnet_id: "{{ ec2_vpc_subnet | default(omit, true) }}"
assign_public_ip: "{{ ec2_assign_public_ip | default(omit, true) }}"
- user_data: "{{ user_data }}"
+ user_data: "{{ lookup('template', '../templates/user_data.j2') }}"
wait: yes
instance_tags:
created-by: "{{ created_by }}"
@@ -191,6 +190,22 @@
- instances
- ec2.instances
+- name: Add new instances to nodes_to_add group if needed
+ add_host:
+ hostname: "{{ item.0 }}"
+ ansible_ssh_host: "{{ item.1.dns_name }}"
+ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
+ ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
+ groups: nodes_to_add
+ ec2_private_ip_address: "{{ item.1.private_ip }}"
+ ec2_ip_address: "{{ item.1.public_ip }}"
+ openshift_node_labels: "{{ node_label }}"
+ logrotate_scripts: "{{ logrotate }}"
+ with_together:
+ - instances
+ - ec2.instances
+ when: oo_extend_env is defined and oo_extend_env | bool
+
- name: Wait for ssh
wait_for: "port=22 host={{ item.dns_name }}"
with_items: ec2.instances
diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2
index 82c2f4d57..3621a7d7d 100644
--- a/playbooks/aws/openshift-cluster/templates/user_data.j2
+++ b/playbooks/aws/openshift-cluster/templates/user_data.j2
@@ -1,5 +1,5 @@
#cloud-config
-{% if type =='etcd' %}
+{% if type == 'etcd' and 'etcd' in volume_defs[type] %}
cloud_config_modules:
- disk_setup
- mounts
@@ -19,7 +19,7 @@ fs_setup:
partition: auto
{% endif %}
-{% if type == 'node' %}
+{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %}
mounts:
- [ xvdb ]
- [ ephemeral0 ]
@@ -43,3 +43,10 @@ growpart:
runcmd:
- xfs_growfs /var
{% endif %}
+
+{% if deployment_vars[deployment_type].sudo %}
+- path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty
+ permissions: 440
+ content: |
+ Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty
+{% endif %}
diff --git a/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
new file mode 100644
index 000000000..8cad51b5e
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -0,0 +1,33 @@
+---
+# This playbook upgrades an existing AWS cluster, leaving nodes untouched if used with an 'online' deployment type.
+# Usage:
+# ansible-playbook playbooks/aws/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml -e deployment_type=online -e cluster_id=<cluster_id>
+- hosts: localhost
+ gather_facts: no
+ vars_files:
+ - ../../vars.yml
+ - "../../vars.{{ deployment_type }}.{{ cluster_id }}.yml"
+
+ tasks:
+ - set_fact:
+ g_ssh_user_tmp: "{{ deployment_vars[deployment_type].ssh_user }}"
+ g_sudo_tmp: "{{ deployment_vars[deployment_type].sudo }}"
+
+ - set_fact:
+ tmp_nodes_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-node' }}"
+ when: deployment_type != 'online'
+
+- include: ../../../../common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+ vars:
+ g_etcd_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-etcd' }}"
+ g_lb_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-lb' }}"
+ g_masters_group: "{{ 'tag_env-host-type_' ~ cluster_id ~ '-openshift-master' }}"
+ g_nodes_group: "{{ tmp_nodes_group | default('') }}"
+ g_ssh_user: "{{ hostvars.localhost.g_ssh_user_tmp }}"
+ g_sudo: "{{ hostvars.localhost.g_sudo_tmp }}"
+ g_nodeonmaster: true
+ openshift_cluster_id: "{{ cluster_id }}"
+ openshift_debug_level: 2
+ openshift_deployment_type: "{{ deployment_type }}"
+ openshift_hostname: "{{ ec2_private_ip_address }}"
+ openshift_public_hostname: "{{ ec2_ip_address }}"
diff --git a/playbooks/byo/openshift-cluster/scaleup.yml b/playbooks/byo/openshift-cluster/scaleup.yml
new file mode 100644
index 000000000..70644d427
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/scaleup.yml
@@ -0,0 +1,10 @@
+---
+- include: ../../common/openshift-cluster/scaleup.yml
+ vars:
+ g_etcd_group: "{{ 'etcd' }}"
+ g_masters_group: "{{ 'masters' }}"
+ g_new_nodes_group: "{{ 'new_nodes' }}"
+ g_lb_group: "{{ 'lb' }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: 2
+ openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index 6d7c12fd4..babdfb952 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,7 +1,6 @@
---
- name: Gather Cluster facts
- hosts: all
- gather_facts: no
+ hosts: OSEv3
roles:
- openshift_facts
tasks:
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index a8bd634d3..482fa8441 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -6,6 +6,3 @@
- include: ../openshift-master/config.yml
- include: ../openshift-node/config.yml
- vars:
- osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
- osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].cluster_dns_ip }}"
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index 2bb69614f..34da372a4 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -12,8 +12,8 @@
when: g_masters_group is not defined
- fail:
- msg: This playbook requires g_nodes_group to be set
- when: g_nodes_group is not defined
+ msg: This playbook requires g_nodes_group or g_new_nodes_group to be set
+ when: g_nodes_group is not defined and g_new_nodes_group is not defined
- fail:
msg: This playbook requires g_lb_group to be set
@@ -35,14 +35,19 @@
ansible_sudo: "{{ g_sudo | default(omit) }}"
with_items: groups[g_masters_group] | default([])
+ # Use g_new_nodes_group if it exists otherwise g_nodes_group
+ - set_fact:
+ g_nodes_to_config: "{{ g_new_nodes_group | default(g_nodes_group | default([])) }}"
+
- name: Evaluate oo_nodes_to_config
add_host:
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
- with_items: groups[g_nodes_group] | default([])
+ with_items: groups[g_nodes_to_config] | default([])
+ # Skip adding the master to oo_nodes_to_config when g_new_nodes_group is
- name: Evaluate oo_nodes_to_config
add_host:
name: "{{ item }}"
@@ -50,7 +55,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
with_items: groups[g_masters_group] | default([])
- when: g_nodeonmaster is defined and g_nodeonmaster == true
+ when: g_nodeonmaster | default(false) == true and g_new_nodes_group is not defined
- name: Evaluate oo_first_etcd
add_host:
diff --git a/playbooks/common/openshift-cluster/scaleup.yml b/playbooks/common/openshift-cluster/scaleup.yml
index 6d2777732..e1778e41e 100644
--- a/playbooks/common/openshift-cluster/scaleup.yml
+++ b/playbooks/common/openshift-cluster/scaleup.yml
@@ -1,13 +1,5 @@
---
- include: evaluate_groups.yml
- vars:
- g_etcd_group: "{{ 'etcd' }}"
- g_masters_group: "{{ 'masters' }}"
- g_nodes_group: "{{ 'nodes' }}"
- g_lb_group: "{{ 'lb' }}"
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: 2
- openshift_deployment_type: "{{ deployment_type }}"
- include: ../openshift-node/config.yml
vars:
diff --git a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
index ed4ab6d1b..b5459f312 100644
--- a/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
+++ b/playbooks/common/openshift-cluster/upgrades/files/pre-upgrade-check
@@ -83,7 +83,7 @@ def get(obj, *paths):
# pylint: disable=too-many-arguments
-def pretty_print_errors(namespace, kind, item_name, container_name, port_name, valid):
+def pretty_print_errors(namespace, kind, item_name, container_name, invalid_label, port_name, valid):
"""
Prints out results in human friendly way.
@@ -93,15 +93,16 @@ def pretty_print_errors(namespace, kind, item_name, container_name, port_name, v
- `item_name`: Name of the resource
- `container_name`: Name of the container. May be "" when kind=Service.
- `port_name`: Name of the port
+ - `invalid_label`: The label of the invalid port. Port.name/targetPort
- `valid`: True if the port is valid
"""
if not valid:
if len(container_name) > 0:
- print('%s/%s -n %s (Container="%s" Port="%s")' % (
- kind, item_name, namespace, container_name, port_name))
+ print('%s/%s -n %s (Container="%s" %s="%s")' % (
+ kind, item_name, namespace, container_name, invalid_label, port_name))
else:
- print('%s/%s -n %s (Port="%s")' % (
- kind, item_name, namespace, port_name))
+ print('%s/%s -n %s (%s="%s")' % (
+ kind, item_name, namespace, invalid_label, port_name))
def print_validation_header():
@@ -160,7 +161,7 @@ def main():
print_validation_header()
pretty_print_errors(
namespace, kind, item_name,
- container_name, port_name, valid)
+ container_name, "Port.name", port_name, valid)
# Services follow a different flow
for item in list_items('services'):
@@ -176,7 +177,8 @@ def main():
first_error = False
print_validation_header()
pretty_print_errors(
- namespace, "services", item_name, "", port_name, valid)
+ namespace, "services", item_name, "",
+ "targetPort", port_name, valid)
# If we had at least 1 error then exit with 1
if not first_error:
diff --git a/playbooks/common/openshift-cluster/upgrades/files/versions.sh b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
index f90719cab..c7c966b60 100644
--- a/playbooks/common/openshift-cluster/upgrades/files/versions.sh
+++ b/playbooks/common/openshift-cluster/upgrades/files/versions.sh
@@ -2,9 +2,9 @@
yum_installed=$(yum list installed "$@" 2>&1 | tail -n +2 | grep -v 'Installed Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
-yum_available=$(yum list available "$@" 2>&1 | tail -n +2 | grep -v 'Available Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'el7ose' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
+yum_available=$(yum list available -q "$@" 2>&1 | tail -n +2 | grep -v 'Available Packages' | grep -v 'Red Hat Subscription Management' | grep -v 'el7ose' | grep -v 'Error:' | awk '{ print $2 }' | tr '\n' ' ')
echo "---"
-echo "curr_version: ${yum_installed}"
+echo "curr_version: ${yum_installed}"
echo "avail_version: ${yum_available}"
diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
index a6721bb92..9a065fd1c 100755
--- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
+++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py
@@ -78,6 +78,10 @@ def upgrade_master_3_0_to_3_1(ansible_module, config_base, backup):
config['kubernetesMasterConfig'].pop('apiLevels')
changes.append('master-config.yaml: removed kubernetesMasterConfig.apiLevels')
+ # Add masterCA to serviceAccountConfig
+ if 'serviceAccountConfig' in config and 'masterCA' not in config['serviceAccountConfig']:
+ config['serviceAccountConfig']['masterCA'] = config['oauthConfig'].get('masterCA', 'ca.crt')
+
# Add proxyClientInfo to master-config
if 'proxyClientInfo' not in config['kubernetesMasterConfig']:
config['kubernetesMasterConfig']['proxyClientInfo'] = {
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
index 78797f8b8..0309e8a77 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_0_to_v3_1/upgrade.yml
@@ -10,7 +10,7 @@
roles:
- openshift_facts
-- name: Evaluate etcd_hosts_to_backup
+- name: Evaluate additional groups for upgrade
hosts: localhost
tasks:
- name: Evaluate etcd_hosts_to_backup
@@ -36,9 +36,9 @@
- fail:
msg: >
- This upgrade is only supported for origin and openshift-enterprise
+ This upgrade is only supported for origin, openshift-enterprise, and online
deployment types
- when: deployment_type not in ['origin','openshift-enterprise']
+ when: deployment_type not in ['origin','openshift-enterprise', 'online']
- fail:
msg: >
@@ -52,7 +52,7 @@
- name: Verify upgrade can proceed
- hosts: masters:nodes
+ hosts: oo_masters_to_config:oo_nodes_to_config
tasks:
- name: Clean yum cache
command: yum clean all
@@ -78,6 +78,29 @@
msg: Atomic OpenShift 3.1 packages not found
when: g_aos_versions.curr_version | version_compare('3.0.2.900','<') and (g_aos_versions.avail_version is none or g_aos_versions.avail_version | version_compare('3.0.2.900','<'))
+ - set_fact:
+ pre_upgrade_complete: True
+
+
+##############################################################################
+# Gate on pre-upgrade checks
+##############################################################################
+- name: Gate on pre-upgrade checks
+ hosts: localhost
+ vars:
+ pre_upgrade_hosts: "{{ groups.oo_masters_to_config | union(groups.oo_nodes_to_config) }}"
+ tasks:
+ - set_fact:
+ pre_upgrade_completed: "{{ hostvars
+ | oo_select_keys(pre_upgrade_hosts)
+ | oo_collect('inventory_hostname', {'pre_upgrade_complete': true}) }}"
+ - set_fact:
+ pre_upgrade_failed: "{{ pre_upgrade_hosts | difference(pre_upgrade_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete pre-upgrade checks: {{ pre_upgrade_failed | join(',') }}"
+ when: pre_upgrade_failed | length > 0
+
+
###############################################################################
# Backup etcd
@@ -90,6 +113,7 @@
roles:
- openshift_facts
tasks:
+ # Ensure we persist the etcd role for this host in openshift_facts
- openshift_facts:
role: etcd
local_facts: {}
@@ -134,11 +158,32 @@
etcdctl backup --data-dir={{ openshift.etcd.etcd_data_dir }}
--backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}
+ - set_fact:
+ etcd_backup_complete: True
+
- name: Display location of etcd backup
debug:
msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"
+##############################################################################
+# Gate on etcd backup
+##############################################################################
+- name: Gate on etcd backup
+ hosts: localhost
+ tasks:
+ - set_fact:
+ etcd_backup_completed: "{{ hostvars
+ | oo_select_keys(groups.etcd_hosts_to_backup)
+ | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}"
+ - set_fact:
+ etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}"
+ when: etcd_backup_failed | length > 0
+
+
+
###############################################################################
# Upgrade Masters
###############################################################################
@@ -152,7 +197,7 @@
changed_when: False
- name: Update deployment type
- hosts: OSEv3
+ hosts: oo_masters_to_config:oo_nodes_to_config:oo_etcd_to_config
roles:
- openshift_facts
post_tasks:
@@ -161,6 +206,16 @@
local_facts:
deployment_type: "{{ deployment_type }}"
+- name: Update master facts
+ hosts: oo_masters_to_config
+ roles:
+ - openshift_facts
+ post_tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ cluster_method: "{{ openshift_master_cluster_method | default(None) }}"
+
- name: Upgrade master packages and configuration
hosts: oo_masters_to_config
vars:
@@ -290,6 +345,30 @@
changed_when: False
+- name: Set master update status to complete
+ hosts: oo_masters_to_config
+ tasks:
+ - set_fact:
+ master_update_complete: True
+
+
+##############################################################################
+# Gate on master update complete
+##############################################################################
+- name: Gate on master update
+ hosts: localhost
+ tasks:
+ - set_fact:
+ master_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'master_update_complete': true}) }}"
+ - set_fact:
+ master_update_failed: "{{ groups.oo_masters_to_config | difference(master_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish updating: {{ master_update_failed | join(',') }}"
+ when: master_update_failed | length > 0
+
+
###############################################################################
# Upgrade Nodes
###############################################################################
@@ -309,6 +388,26 @@
- name: Ensure node service enabled
service: name="{{ openshift.common.service_type }}-node" state=started enabled=yes
+ - set_fact:
+ node_update_complete: True
+
+
+##############################################################################
+# Gate on nodes update
+##############################################################################
+- name: Gate on nodes update
+ hosts: localhost
+ tasks:
+ - set_fact:
+ node_update_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_nodes_to_config)
+ | oo_collect('inventory_hostname', {'node_update_complete': true}) }}"
+ - set_fact:
+ node_update_failed: "{{ groups.oo_nodes_to_config | difference(node_update_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following nodes did not finish updating: {{ node_update_failed | join(',') }}"
+ when: node_update_failed | length > 0
+
###############################################################################
# Post upgrade - Reconcile Cluster Roles and Cluster Role Bindings
@@ -356,6 +455,28 @@
when: openshift_master_ha | bool
run_once: true
+ - set_fact:
+ reconcile_complete: True
+
+
+##############################################################################
+# Gate on reconcile
+##############################################################################
+- name: Gate on reconcile
+ hosts: localhost
+ tasks:
+ - set_fact:
+ reconcile_completed: "{{ hostvars
+ | oo_select_keys(groups.oo_masters_to_config)
+ | oo_collect('inventory_hostname', {'reconcile_complete': true}) }}"
+ - set_fact:
+ reconcile_failed: "{{ groups.oo_masters_to_config | difference(reconcile_completed) }}"
+ - fail:
+ msg: "Upgrade cannot continue. The following masters did not finish reconciling: {{ reconcile_failed | join(',') }}"
+ when: reconcile_failed | length > 0
+
+
+
###############################################################################
# Post upgrade - Upgrade default router, default registry and examples
@@ -396,24 +517,28 @@
- _default_router.rc == 0
- "'false' in _scc.stdout"
command: >
- {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
+ {{ oc_cmd }} patch scc/privileged -p
+ '{"allowHostPorts":true,"allowHostNetwork":true}' --api-version=v1
- name: Update deployment config to 1.0.4/3.0.1 spec
when: _default_router.rc == 0
command: >
{{ oc_cmd }} patch dc/router -p
'{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
+ --api-version=v1
- name: Switch to hostNetwork=true
when: _default_router.rc == 0
command: >
{{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
+ --api-version=v1
- name: Update router image to current version
when: _default_router.rc == 0
command: >
{{ oc_cmd }} patch dc/router -p
'{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+ --api-version=v1
- name: Check for default registry
command: >
@@ -427,3 +552,4 @@
command: >
{{ oc_cmd }} patch dc/docker-registry -p
'{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+ --api-version=v1
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index ed23ada88..7d94ced2e 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -24,7 +24,7 @@
- /etc/etcd/ca.crt
register: g_etcd_server_cert_stat_result
- set_fact:
- etcd_server_certs_missing: "{{ g_etcd_server_cert_stat_result.results | map(attribute='stat.exists')
+ etcd_server_certs_missing: "{{ g_etcd_server_cert_stat_result.results | oo_collect(attribute='stat.exists')
| list | intersect([false])}}"
etcd_cert_subdir: etcd-{{ openshift.common.hostname }}
etcd_cert_config_dir: /etc/etcd
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 1b3fba3aa..becd68dbe 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -51,9 +51,6 @@
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- - role: etcd
- local_facts: {}
- when: openshift.master.embedded_etcd | bool
- name: Check status of external etcd certificatees
stat:
path: "{{ openshift.common.config_base }}/master/{{ item }}"
@@ -63,7 +60,7 @@
register: g_external_etcd_cert_stat_result
- set_fact:
etcd_client_certs_missing: "{{ g_external_etcd_cert_stat_result.results
- | map(attribute='stat.exists')
+ | oo_collect(attribute='stat.exists')
| list | intersect([false])}}"
etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }}
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
@@ -160,7 +157,7 @@
register: g_master_cert_stat_result
- set_fact:
master_certs_missing: "{{ False in (g_master_cert_stat_result.results
- | map(attribute='stat.exists')
+ | oo_collect(attribute='stat.exists')
| list ) }}"
master_cert_subdir: master-{{ openshift.common.hostname }}
master_cert_config_dir: "{{ openshift.common.config_base }}/master"
@@ -207,14 +204,6 @@
validate_checksum: yes
with_items: masters_needing_certs
-- name: Inspect named certificates
- hosts: oo_first_master
- tasks:
- - name: Collect certificate names
- set_fact:
- parsed_named_certificates: "{{ openshift_master_named_certificates | oo_parse_certificate_names(master_cert_config_dir, openshift.common.internal_hostnames) }}"
- when: openshift_master_named_certificates is defined
-
- name: Compute haproxy_backend_servers
hosts: localhost
connection: local
@@ -255,6 +244,8 @@
- fail:
msg: "openshift_master_session_auth_secrets and openshift_master_encryption_secrets must be equal length"
when: (openshift_master_session_auth_secrets is defined and openshift_master_session_encryption_secrets is defined) and (openshift_master_session_auth_secrets | length != openshift_master_session_encryption_secrets | length)
+ - name: Install OpenSSL package
+ action: "{{ansible_pkg_mgr}} pkg=openssl state=present"
- name: Generate session authentication key
command: /usr/bin/openssl rand -base64 24
register: session_auth_output
@@ -268,18 +259,62 @@
- set_fact:
session_auth_secret: "{{ openshift_master_session_auth_secrets
| default(session_auth_output.results
- | map(attribute='stdout')
+ | oo_collect(attribute='stdout')
| list) }}"
session_encryption_secret: "{{ openshift_master_session_encryption_secrets
| default(session_encryption_output.results
- | map(attribute='stdout')
+ | oo_collect(attribute='stdout')
| list) }}"
+- name: Parse named certificates
+ hosts: localhost
+ vars:
+ internal_hostnames: "{{ hostvars[groups.oo_first_master.0].openshift.common.internal_hostnames }}"
+ named_certificates: "{{ hostvars[groups.oo_first_master.0].openshift_master_named_certificates | default([]) }}"
+ named_certificates_dir: "{{ hostvars[groups.oo_first_master.0].master_cert_config_dir }}/named_certificates/"
+ tasks:
+ - set_fact:
+ parsed_named_certificates: "{{ named_certificates | oo_parse_named_certificates(named_certificates_dir, internal_hostnames) }}"
+ when: named_certificates | length > 0
+
+- name: Deploy named certificates
+ hosts: oo_masters_to_config
+ vars:
+ named_certs_dir: "{{ master_cert_config_dir }}/named_certificates/"
+ named_certs_specified: "{{ openshift_master_named_certificates is defined }}"
+ overwrite_named_certs: "{{ openshift_master_overwrite_named_certificates | default(false) }}"
+ roles:
+ - role: openshift_facts
+ post_tasks:
+ - openshift_facts:
+ role: master
+ local_facts:
+ named_certificates: "{{ hostvars.localhost.parsed_named_certificates | default([]) }}"
+ additive_facts_to_overwrite:
+ - "{{ 'master.named_certificates' if overwrite_named_certs | bool else omit }}"
+ - name: Clear named certificates
+ file:
+ path: "{{ named_certs_dir }}"
+ state: absent
+ when: overwrite_named_certs | bool
+ - name: Ensure named certificate directory exists
+ file:
+ path: "{{ named_certs_dir }}"
+ state: directory
+ when: named_certs_specified | bool
+ - name: Land named certificates
+ copy: src="{{ item.certfile }}" dest="{{ named_certs_dir }}"
+ with_items: openshift_master_named_certificates
+ when: named_certs_specified | bool
+ - name: Land named certificate keys
+ copy: src="{{ item.keyfile }}" dest="{{ named_certs_dir }}"
+ with_items: openshift_master_named_certificates
+ when: named_certs_specified | bool
+
- name: Configure master instances
hosts: oo_masters_to_config
serial: 1
vars:
- named_certificates: "{{ hostvars[groups['oo_first_master'][0]]['parsed_named_certificates'] | default([])}}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
openshift_master_count: "{{ groups.oo_masters_to_config | length }}"
@@ -317,20 +352,8 @@
- openshift_examples
- role: openshift_cluster_metrics
when: openshift.common.use_cluster_metrics | bool
-
-- name: Determine cluster dns ip
- hosts: oo_first_master
- tasks:
- - name: Get master service ip
- command: "{{ openshift.common.client_binary }} get -o template svc kubernetes --template=\\{\\{.spec.clusterIP\\}\\}"
- register: master_service_ip_output
- when: openshift.common.version_greater_than_3_1_or_1_1 | bool
- - set_fact:
- cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"
- when: not openshift.common.version_greater_than_3_1_or_1_1 | bool
- - set_fact:
- cluster_dns_ip: "{{ master_service_ip_output.stdout }}"
- when: openshift.common.version_greater_than_3_1_or_1_1 | bool
+ - role: openshift_manageiq
+ when: openshift.common.use_manageiq | bool
- name: Enable cockpit
hosts: oo_first_master
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 8da9e231f..2b6171cb3 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -33,7 +33,7 @@
- server.crt
register: stat_result
- set_fact:
- certs_missing: "{{ stat_result.results | map(attribute='stat.exists')
+ certs_missing: "{{ stat_result.results | oo_collect(attribute='stat.exists')
| list | intersect([false])}}"
node_subdir: node-{{ openshift.common.hostname }}
config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}"
@@ -48,7 +48,7 @@
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config and (openshift.common.use_flannel | bool)
- set_fact:
etcd_client_flannel_certs_missing: "{{ g_external_etcd_flannel_cert_stat_result.results
- | map(attribute='stat.exists')
+ | oo_collect(attribute='stat.exists')
| list | intersect([false])}}"
etcd_cert_subdir: openshift-node-{{ openshift.common.hostname }}
etcd_cert_config_dir: "{{ openshift.common.config_base }}/node"
diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml
index 8be5d53e7..d6ef57c45 100644
--- a/playbooks/gce/openshift-cluster/launch.yml
+++ b/playbooks/gce/openshift-cluster/launch.yml
@@ -16,6 +16,8 @@
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
g_sub_host_type: "default"
+ gce_machine_type: "{{ lookup('env', 'gce_machine_master_type') | default(lookup('env', 'gce_machine_type'), true) }}"
+ gce_machine_image: "{{ lookup('env', 'gce_machine_master_image') | default(lookup('env', 'gce_machine_image'), true) }}"
- include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
@@ -27,6 +29,8 @@
cluster: "{{ cluster_id }}"
type: "{{ k8s_type }}"
g_sub_host_type: "{{ sub_host_type }}"
+ gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}"
+ gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}"
- include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml
vars:
diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
index c428cb465..de8a75b18 100644
--- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml
@@ -5,8 +5,8 @@
- name: Launch instance(s)
gce:
instance_names: "{{ instances }}"
- machine_type: "{{ lookup('env', 'gce_machine_type') | default('n1-standard-1', true) }}"
- image: "{{ lookup('env', 'gce_machine_image') | default(deployment_vars[deployment_type].image, true) }}"
+ machine_type: "{{ gce_machine_type | default(deployment_vars[deployment_type].machine_type, true) }}"
+ image: "{{ gce_machine_image | default(deployment_vars[deployment_type].image, true) }}"
service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}"
pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"
project_id: "{{ lookup('env', 'gce_project_id') }}"
diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml
index 6de007807..a8ce8eb22 100644
--- a/playbooks/gce/openshift-cluster/vars.yml
+++ b/playbooks/gce/openshift-cluster/vars.yml
@@ -5,13 +5,16 @@ sdn_network_plugin: redhat/openshift-ovs-subnet
deployment_vars:
origin:
image: preinstalled-slave-50g-v5
+ machine_type: n1-standard-1
ssh_user: root
sudo: yes
online:
image: libra-rhel7
+ machine_type: n1-standard-1
ssh_user: root
sudo: no
enterprise:
image: rhel-7
+ machine_type: n1-standard-1
ssh_user:
sudo: yes
diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml
index eaedc4d0d..5954bb01e 100644
--- a/playbooks/libvirt/openshift-cluster/list.yml
+++ b/playbooks/libvirt/openshift-cluster/list.yml
@@ -18,6 +18,12 @@
- name: List Hosts
hosts: oo_list_hosts
+
+- name: List Hosts
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
tasks:
- debug:
- msg: 'public:{{ansible_default_ipv4.address}} private:{{ansible_default_ipv4.address}}'
+ msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 4b91c6da8..4825207c9 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -81,7 +81,7 @@
ansible_ssh_host: '{{ item.1 }}'
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_sudo: "{{ deployment_vars[deployment_type].sudo }}"
- groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}'
+ groups: 'tag_env-{{ cluster }}, tag_host-type-{{ type }}, tag_env-host-type-{{ cluster }}-openshift-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}'
with_together:
- instances
- ips
diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
index df200e374..870bcf2a6 100644
--- a/playbooks/libvirt/openshift-cluster/templates/domain.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml
@@ -6,6 +6,7 @@
<ansible:tag>env-{{ cluster }}</ansible:tag>
<ansible:tag>env-host-type-{{ cluster }}-openshift-{{ type }}</ansible:tag>
<ansible:tag>host-type-{{ type }}</ansible:tag>
+ <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag>
</ansible:tags>
</metadata>
<currentMemory unit='GiB'>1</currentMemory>
diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml
index a75e350c7..fa194b072 100644
--- a/playbooks/openstack/openshift-cluster/list.yml
+++ b/playbooks/openstack/openshift-cluster/list.yml
@@ -19,6 +19,12 @@
- name: List Hosts
hosts: oo_list_hosts
+
+- name: List Hosts
+ hosts: localhost
+ gather_facts: no
+ vars_files:
+ - vars.yml
tasks:
- debug:
- msg: 'public:{{ansible_ssh_host}} private:{{ansible_default_ipv4.address}}'
+ msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}"
diff --git a/rel-eng/packages/.readme b/rel-eng/packages/.readme
deleted file mode 100644
index 8999c8dbc..000000000
--- a/rel-eng/packages/.readme
+++ /dev/null
@@ -1,3 +0,0 @@
-the rel-eng/packages directory contains metadata files
-named after their packages. Each file has the latest tagged
-version and the project's relative directory.
diff --git a/rel-eng/packages/openshift-ansible-bin b/rel-eng/packages/openshift-ansible-bin
deleted file mode 100644
index 11c2906f0..000000000
--- a/rel-eng/packages/openshift-ansible-bin
+++ /dev/null
@@ -1 +0,0 @@
-0.0.19-1 bin/
diff --git a/rel-eng/packages/openshift-ansible-inventory b/rel-eng/packages/openshift-ansible-inventory
deleted file mode 100644
index 733c626cf..000000000
--- a/rel-eng/packages/openshift-ansible-inventory
+++ /dev/null
@@ -1 +0,0 @@
-0.0.9-1 inventory/
diff --git a/rel-eng/tito.props b/rel-eng/tito.props
deleted file mode 100644
index eab3f190d..000000000
--- a/rel-eng/tito.props
+++ /dev/null
@@ -1,5 +0,0 @@
-[buildconfig]
-builder = tito.builder.Builder
-tagger = tito.tagger.VersionTagger
-changelog_do_not_remove_cherrypick = 0
-changelog_format = %s (%ae)
diff --git a/roles/ansible/tasks/main.yml b/roles/ansible/tasks/main.yml
index 5d20a3b35..f79273824 100644
--- a/roles/ansible/tasks/main.yml
+++ b/roles/ansible/tasks/main.yml
@@ -5,6 +5,13 @@
yum:
pkg: ansible
state: installed
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install Ansible
+ dnf:
+ pkg: ansible
+ state: installed
+ when: ansible_pkg_mgr == "dnf"
- include: config.yml
vars:
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml
index 875cbad21..8410e7c90 100644
--- a/roles/cockpit/tasks/main.yml
+++ b/roles/cockpit/tasks/main.yml
@@ -8,6 +8,18 @@
- cockpit-shell
- cockpit-bridge
- "{{ cockpit_plugins }}"
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install cockpit-ws
+ dnf:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - cockpit-ws
+ - cockpit-shell
+ - cockpit-bridge
+ - "{{ cockpit_plugins }}"
+ when: ansible_pkg_mgr == "dnf"
- name: Enable cockpit-ws
service:
diff --git a/roles/copr_cli/README.md b/roles/copr_cli/README.md
new file mode 100644
index 000000000..edc68454e
--- /dev/null
+++ b/roles/copr_cli/README.md
@@ -0,0 +1,38 @@
+Role Name
+=========
+
+This role manages Copr CLI.
+
+https://apps.fedoraproject.org/packages/copr-cli/
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+ - hosts: servers
+ roles:
+ - role: copr_cli
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Thomas Wiest
diff --git a/roles/copr_cli/defaults/main.yml b/roles/copr_cli/defaults/main.yml
new file mode 100644
index 000000000..3b8adf910
--- /dev/null
+++ b/roles/copr_cli/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for copr_cli
diff --git a/roles/copr_cli/handlers/main.yml b/roles/copr_cli/handlers/main.yml
new file mode 100644
index 000000000..c3dec5a4c
--- /dev/null
+++ b/roles/copr_cli/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for copr_cli
diff --git a/roles/copr_cli/meta/main.yml b/roles/copr_cli/meta/main.yml
new file mode 100644
index 000000000..f050281fd
--- /dev/null
+++ b/roles/copr_cli/meta/main.yml
@@ -0,0 +1,14 @@
+---
+galaxy_info:
+ author: Thomas Wiest
+ description: Manages Copr CLI
+ company: Red Hat
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - packaging
+dependencies: []
diff --git a/roles/copr_cli/tasks/main.yml b/roles/copr_cli/tasks/main.yml
new file mode 100644
index 000000000..f8496199d
--- /dev/null
+++ b/roles/copr_cli/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+- yum:
+ name: copr-cli
+ state: present
+ when: ansible_pkg_mgr == "yum"
+
+- dnf:
+ name: copr-cli
+ state: present
+ when: ansible_pkg_mgr == "dnf"
diff --git a/roles/copr_cli/vars/main.yml b/roles/copr_cli/vars/main.yml
new file mode 100644
index 000000000..1522c94d9
--- /dev/null
+++ b/roles/copr_cli/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for copr_cli
diff --git a/roles/docker/README.md b/roles/docker/README.md
index 225dd44b9..46f259eb7 100644
--- a/roles/docker/README.md
+++ b/roles/docker/README.md
@@ -1,38 +1,38 @@
Role Name
=========
-A brief description of the role goes here.
+Ensures docker package is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
Requirements
------------
-Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
+None
Role Variables
--------------
-A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
+udevw_udevd_dir: location of systemd config for systemd-udevd.service
+docker_udev_workaround: raises udevd timeout to 5 minutes (https://bugzilla.redhat.com/show_bug.cgi?id=1272446)
Dependencies
------------
-A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
+None
Example Playbook
----------------
-Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
-
- hosts: servers
roles:
- - { role: username.rolename, x: 42 }
+ - role: docker
+ docker_udev_workaround: "true"
License
-------
-BSD
+ASL 2.0
Author Information
------------------
-An optional section for the role authors to include contact information, or a website (HTML is not allowed).
+OpenShift operations, Red Hat, Inc
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
index eca7419c1..7d60f1891 100644
--- a/roles/docker/handlers/main.yml
+++ b/roles/docker/handlers/main.yml
@@ -2,3 +2,8 @@
- name: restart docker
service: name=docker state=restarted
+
+- name: restart udev
+ service:
+ name: systemd-udevd
+ state: restarted
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index c5c362c60..6e2c98601 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -1,124 +1,12 @@
---
galaxy_info:
- author: your name
- description:
- company: your company (optional)
- # Some suggested licenses:
- # - BSD (default)
- # - MIT
- # - GPLv2
- # - GPLv3
- # - Apache
- # - CC-BY
- license: license (GPLv2, CC-BY, etc)
+ author: OpenShift
+ description: docker package install
+ company: Red Hat, Inc
+ license: ASL 2.0
min_ansible_version: 1.2
- #
- # Below are all platforms currently available. Just uncomment
- # the ones that apply to your role. If you don't see your
- # platform on this list, let us know and we'll get it added!
- #
- #platforms:
- #- name: EL
- # versions:
- # - all
- # - 5
- # - 6
- # - 7
- #- name: GenericUNIX
- # versions:
- # - all
- # - any
- #- name: Fedora
- # versions:
- # - all
- # - 16
- # - 17
- # - 18
- # - 19
- # - 20
- #- name: opensuse
- # versions:
- # - all
- # - 12.1
- # - 12.2
- # - 12.3
- # - 13.1
- # - 13.2
- #- name: Amazon
- # versions:
- # - all
- # - 2013.03
- # - 2013.09
- #- name: GenericBSD
- # versions:
- # - all
- # - any
- #- name: FreeBSD
- # versions:
- # - all
- # - 8.0
- # - 8.1
- # - 8.2
- # - 8.3
- # - 8.4
- # - 9.0
- # - 9.1
- # - 9.1
- # - 9.2
- #- name: Ubuntu
- # versions:
- # - all
- # - lucid
- # - maverick
- # - natty
- # - oneiric
- # - precise
- # - quantal
- # - raring
- # - saucy
- # - trusty
- #- name: SLES
- # versions:
- # - all
- # - 10SP3
- # - 10SP4
- # - 11
- # - 11SP1
- # - 11SP2
- # - 11SP3
- #- name: GenericLinux
- # versions:
- # - all
- # - any
- #- name: Debian
- # versions:
- # - all
- # - etch
- # - lenny
- # - squeeze
- # - wheezy
- #
- # Below are all categories currently available. Just as with
- # the platforms above, uncomment those that apply to your role.
- #
- #categories:
- #- cloud
- #- cloud:ec2
- #- cloud:gce
- #- cloud:rax
- #- clustering
- #- database
- #- database:nosql
- #- database:sql
- #- development
- #- monitoring
- #- networking
- #- packaging
- #- system
- #- web
+ platforms:
+ - name: EL
+ versions:
+ - 7
dependencies: []
- # List your role dependencies here, one per line. Only
- # dependencies available via galaxy should be listed here.
- # Be sure to remove the '[]' above if you add dependencies
- # to this list.
-
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 96949230d..857674454 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -2,7 +2,14 @@
# tasks file for docker
- name: Install docker
yum: pkg=docker
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install docker
+ dnf: pkg=docker
+ when: ansible_pkg_mgr == "dnf"
- name: enable and start the docker service
service: name=docker enabled=yes state=started
+- include: udev_workaround.yml
+ when: docker_udev_workaround | default(False)
diff --git a/roles/docker/tasks/udev_workaround.yml b/roles/docker/tasks/udev_workaround.yml
new file mode 100644
index 000000000..3c236f698
--- /dev/null
+++ b/roles/docker/tasks/udev_workaround.yml
@@ -0,0 +1,30 @@
+---
+
+- name: Getting current systemd-udevd exec command
+ command: grep -e "^ExecStart=" /lib/systemd/system/systemd-udevd.service
+ changed_when: false
+ register: udevw_udev_start_cmd
+
+- name: Assure systemd-udevd.service.d directory exists
+ file:
+ path: "{{ udevw_udevd_dir }}"
+ state: directory
+
+- name: Create systemd-udevd override file
+ copy:
+ content: |
+ [Service]
+ #Need blank ExecStart to "clear" pre-exising one
+ ExecStart=
+ {{ udevw_udev_start_cmd.stdout }} --event-timeout=300
+ dest: "{{ udevw_udevd_dir }}/override.conf"
+ owner: root
+ mode: "0644"
+ notify:
+ - restart udev
+ register: udevw_override_conf
+
+- name: reload systemd config files
+ command: systemctl daemon-reload
+ when: udevw_override_conf | changed
+
diff --git a/roles/docker/vars/main.yml b/roles/docker/vars/main.yml
new file mode 100644
index 000000000..162487545
--- /dev/null
+++ b/roles/docker/vars/main.yml
@@ -0,0 +1,3 @@
+---
+
+udevw_udevd_dir: /etc/systemd/system/systemd-udevd.service.d
diff --git a/roles/etcd/README.md b/roles/etcd/README.md
index 88e4ff874..329a926c0 100644
--- a/roles/etcd/README.md
+++ b/roles/etcd/README.md
@@ -7,7 +7,7 @@ Requirements
------------
This role assumes it's being deployed on a RHEL/Fedora based host with package
-named 'etcd' available via yum.
+named 'etcd' available via yum or dnf (conditionally).
Role Variables
--------------
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index fcbdecd37..efaab5f31 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -9,6 +9,11 @@
- name: Install etcd
yum: pkg=etcd-2.* state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install etcd
+ dnf: pkg=etcd* state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Validate permissions on the config dir
file:
diff --git a/roles/flannel/README.md b/roles/flannel/README.md
index b8aa830ac..8f271aada 100644
--- a/roles/flannel/README.md
+++ b/roles/flannel/README.md
@@ -7,7 +7,8 @@ Requirements
------------
This role assumes it's being deployed on a RHEL/Fedora based host with package
-named 'flannel' available via yum, in version superior to 0.3.
+named 'flannel' available via yum or dnf (conditionally), in version superior
+to 0.3.
Role Variables
--------------
diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml
index acfb009ec..86e1bc96e 100644
--- a/roles/flannel/tasks/main.yml
+++ b/roles/flannel/tasks/main.yml
@@ -2,6 +2,12 @@
- name: Install flannel
sudo: true
yum: pkg=flannel state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install flannel
+ sudo: true
+ dnf: pkg=flannel state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Set flannel etcd url
sudo: true
diff --git a/roles/fluentd_master/tasks/main.yml b/roles/fluentd_master/tasks/main.yml
index 55cd94460..43c499b4d 100644
--- a/roles/fluentd_master/tasks/main.yml
+++ b/roles/fluentd_master/tasks/main.yml
@@ -4,6 +4,13 @@
yum:
name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
state: present
+ when: ansible_pkg_mgr == "yum"
+
+- name: download and install td-agent
+ dnf:
+ name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
+ state: present
+ when: ansible_pkg_mgr == "dnf"
- name: Verify fluentd plugin installed
command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
diff --git a/roles/fluentd_node/tasks/main.yml b/roles/fluentd_node/tasks/main.yml
index f9ef30b83..827a1c075 100644
--- a/roles/fluentd_node/tasks/main.yml
+++ b/roles/fluentd_node/tasks/main.yml
@@ -4,6 +4,13 @@
yum:
name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
state: present
+ when: ansible_pkg_mgr == "yum"
+
+- name: download and install td-agent
+ dnf:
+ name: 'http://packages.treasuredata.com/2/redhat/7/x86_64/td-agent-2.2.0-0.x86_64.rpm'
+ state: present
+ when: ansible_pkg_mgr == "dnf"
- name: Verify fluentd plugin installed
command: '/opt/td-agent/embedded/bin/gem query -i fluent-plugin-kubernetes'
diff --git a/roles/haproxy/tasks/main.yml b/roles/haproxy/tasks/main.yml
index 5638b7313..5d015fadd 100644
--- a/roles/haproxy/tasks/main.yml
+++ b/roles/haproxy/tasks/main.yml
@@ -3,6 +3,13 @@
yum:
pkg: haproxy
state: present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install haproxy
+ dnf:
+ pkg: haproxy
+ state: present
+ when: ansible_pkg_mgr == "dnf"
- name: Configure haproxy
template:
diff --git a/roles/kube_nfs_volumes/tasks/main.yml b/roles/kube_nfs_volumes/tasks/main.yml
index d1dcf261a..3fcb9fd18 100644
--- a/roles/kube_nfs_volumes/tasks/main.yml
+++ b/roles/kube_nfs_volumes/tasks/main.yml
@@ -1,6 +1,11 @@
---
- name: Install pyparted (RedHat/Fedora)
yum: name=pyparted,python-httplib2 state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install pyparted (RedHat/Fedora)
+ dnf: name=pyparted,python-httplib2 state=present
+ when: ansible_pkg_mgr == "dnf"
- name: partition the drives
partitionpool: disks={{ disks }} force={{ force }} sizes={{ sizes }}
diff --git a/roles/kube_nfs_volumes/tasks/nfs.yml b/roles/kube_nfs_volumes/tasks/nfs.yml
index 559fcf17c..a58a7b824 100644
--- a/roles/kube_nfs_volumes/tasks/nfs.yml
+++ b/roles/kube_nfs_volumes/tasks/nfs.yml
@@ -1,6 +1,11 @@
---
- name: Install NFS server on Fedora/Red Hat
yum: name=nfs-utils state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install NFS server on Fedora/Red Hat
+ dnf: name=nfs-utils state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Start rpcbind on Fedora/Red Hat
service: name=rpcbind state=started enabled=yes
diff --git a/roles/lib_zabbix/library/zbx_action.py b/roles/lib_zabbix/library/zbx_action.py
index d64cebae1..24693e5db 100644
--- a/roles/lib_zabbix/library/zbx_action.py
+++ b/roles/lib_zabbix/library/zbx_action.py
@@ -89,6 +89,9 @@ def operation_differences(zabbix_ops, user_ops):
for zab, user in zip(zabbix_ops, user_ops):
for key, val in user.items():
if key == 'opconditions':
+ if len(zab[key]) != len(val):
+ rval[key] = val
+ break
for z_cond, u_cond in zip(zab[key], user[key]):
if not all([str(u_cond[op_key]) == z_cond[op_key] for op_key in \
['conditiontype', 'operator', 'value']]):
@@ -330,9 +333,9 @@ def get_action_operations(zapi, inc_operations):
condition['operator'] = 0
if condition['value'] == 'acknowledged':
- condition['operator'] = 1
+ condition['value'] = 1
else:
- condition['operator'] = 0
+ condition['value'] = 0
return inc_operations
diff --git a/roles/lib_zabbix/library/zbx_graph.py b/roles/lib_zabbix/library/zbx_graph.py
new file mode 100644
index 000000000..121ec3dee
--- /dev/null
+++ b/roles/lib_zabbix/library/zbx_graph.py
@@ -0,0 +1,331 @@
+#!/usr/bin/env python
+'''
+ Ansible module for zabbix graphs
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix graphs ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#---
+#- hosts: localhost
+# gather_facts: no
+# tasks:
+# - zbx_graph:
+# zbx_server: https://zabbixserver/zabbix/api_jsonrpc.php
+# zbx_user: Admin
+# zbx_password: zabbix
+# name: Test Graph
+# height: 300
+# width: 500
+# graph_items:
+# - item_name: openshift.master.etcd.create.fail
+# color: red
+# line_style: bold
+# - item_name: openshift.master.etcd.create.success
+# color: red
+# line_style: bold
+#
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_graph_type(graphtype):
+ '''
+ Possible values:
+ 0 - normal;
+ 1 - stacked;
+ 2 - pie;
+ 3 - exploded;
+ '''
+ gtype = 0
+ if 'stacked' in graphtype:
+ gtype = 1
+ elif 'pie' in graphtype:
+ gtype = 2
+ elif 'exploded' in graphtype:
+ gtype = 3
+
+ return gtype
+
+def get_show_legend(show_legend):
+ '''Get the value for show_legend
+ 0 - hide
+ 1 - (default) show
+ '''
+ rval = 1
+ if 'hide' == show_legend:
+ rval = 0
+
+ return rval
+
+def get_template_id(zapi, template_name):
+ '''
+ get related templates
+ '''
+ # Fetch templates by name
+ content = zapi.get_content('template',
+ 'get',
+ {'filter': {'host': template_name},})
+
+ if content.has_key('result'):
+ return content['result'][0]['templateid']
+
+ return None
+
+def get_color(color_in):
+ ''' Receive a color and translate it to a hex representation of the color
+
+ Will have a few setup by default
+ '''
+ colors = {'black': '000000',
+ 'red': 'FF0000',
+ 'pink': 'FFC0CB',
+ 'purple': '800080',
+ 'orange': 'FFA500',
+ 'gold': 'FFD700',
+ 'yellow': 'FFFF00',
+ 'green': '008000',
+ 'cyan': '00FFFF',
+ 'aqua': '00FFFF',
+ 'blue': '0000FF',
+ 'brown': 'A52A2A',
+ 'gray': '808080',
+ 'grey': '808080',
+ 'silver': 'C0C0C0',
+ }
+ if colors.has_key(color_in):
+ return colors[color_in]
+
+ return color_in
+
+def get_line_style(style):
+ '''determine the line style
+ '''
+ line_style = {'line': 0,
+ 'filled': 1,
+ 'bold': 2,
+ 'dot': 3,
+ 'dashed': 4,
+ 'gradient': 5,
+ }
+
+ if line_style.has_key(style):
+ return line_style[style]
+
+ return 0
+
+def get_calc_function(func):
+ '''Determine the caclulation function'''
+ rval = 2 # default to avg
+ if 'min' in func:
+ rval = 1
+ elif 'max' in func:
+ rval = 4
+ elif 'all' in func:
+ rval = 7
+ elif 'last' in func:
+ rval = 9
+
+ return rval
+
+def get_graph_item_type(gtype):
+ '''Determine the graph item type
+ '''
+ rval = 0 # simple graph type
+ if 'sum' in gtype:
+ rval = 2
+
+ return rval
+
+def get_graph_items(zapi, gitems):
+ '''Get graph items by id'''
+
+ r_items = []
+ for item in gitems:
+ content = zapi.get_content('item',
+ 'get',
+ {'filter': {'name': item['item_name']}})
+ _ = item.pop('item_name')
+ color = get_color(item.pop('color'))
+ drawtype = get_line_style(item.get('line_style', 'line'))
+ func = get_calc_function(item.get('calc_func', 'avg'))
+ g_type = get_graph_item_type(item.get('graph_item_type', 'simple'))
+
+ if content.has_key('result'):
+ tmp = {'itemid': content['result'][0]['itemid'],
+ 'color': color,
+ 'drawtype': drawtype,
+ 'calc_fnc': func,
+ 'type': g_type,
+ }
+ r_items.append(tmp)
+
+ return r_items
+
+def compare_gitems(zabbix_items, user_items):
+ '''Compare zabbix results with the user's supplied items
+ return True if user_items are equal
+ return False if any of the values differ
+ '''
+ if len(zabbix_items) != len(user_items):
+ return False
+
+ for u_item in user_items:
+ for z_item in zabbix_items:
+ if u_item['itemid'] == z_item['itemid']:
+ if not all([str(value) == z_item[key] for key, value in u_item.items()]):
+ return False
+
+ return True
+
+# The branches are needed for CRUD and error handling
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible zabbix module for zbx_graphs
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+ zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+ zbx_debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ height=dict(default=None, type='int'),
+ width=dict(default=None, type='int'),
+ graph_type=dict(default='normal', type='str'),
+ show_legend=dict(default='show', type='str'),
+ state=dict(default='present', type='str'),
+ graph_items=dict(default=None, type='list'),
+ ),
+ #supports_check_mode=True
+ )
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+ module.params['zbx_user'],
+ module.params['zbx_password'],
+ module.params['zbx_debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'graph'
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'filter': {'name': module.params['name']},
+ #'templateids': templateid,
+ 'selectGraphItems': 'extend',
+ })
+
+ #******#
+ # GET
+ #******#
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ #******#
+ # DELETE
+ #******#
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['graphid']])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ # Create and Update
+ if state == 'present':
+
+ params = {'name': module.params['name'],
+ 'height': module.params['height'],
+ 'width': module.params['width'],
+ 'graphtype': get_graph_type(module.params['graph_type']),
+ 'show_legend': get_show_legend(module.params['show_legend']),
+ 'gitems': get_graph_items(zapi, module.params['graph_items']),
+ }
+
+ # Remove any None valued params
+ _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+ #******#
+ # CREATE
+ #******#
+ if not exists(content):
+ content = zapi.get_content(zbx_class_name, 'create', params)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state='present')
+
+
+ ########
+ # UPDATE
+ ########
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if key == 'gitems':
+ if not compare_gitems(zab_results[key], value):
+ differences[key] = value
+
+ elif zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences['graphid'] = zab_results['graphid']
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/lib_zabbix/library/zbx_graphprototype.py b/roles/lib_zabbix/library/zbx_graphprototype.py
new file mode 100644
index 000000000..8287c1e2d
--- /dev/null
+++ b/roles/lib_zabbix/library/zbx_graphprototype.py
@@ -0,0 +1,331 @@
+#!/usr/bin/env python
+'''
+ Ansible module for zabbix graphprototypes
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix graphprototypes ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#---
+#- hosts: localhost
+# gather_facts: no
+# tasks:
+# - zbx_graphprototype:
+# zbx_server: https://zabbixserver/zabbix/api_jsonrpc.php
+# zbx_user: Admin
+# zbx_password: zabbix
+# name: Test Graph
+# height: 300
+# width: 500
+# graph_items:
+# - item_name: Bytes per second IN on network interface {#OSO_NET_INTERFACE}
+# color: red
+# line_style: bold
+# item_type: prototype
+# - item_name: Template OS Linux: Bytes per second OUT on network interface {#OSO_NET_INTERFACE}
+# item_type: prototype
+#
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_graph_type(graphtype):
+ '''
+ Possible values:
+ 0 - normal;
+ 1 - stacked;
+ 2 - pie;
+ 3 - exploded;
+ '''
+ gtype = 0
+ if 'stacked' in graphtype:
+ gtype = 1
+ elif 'pie' in graphtype:
+ gtype = 2
+ elif 'exploded' in graphtype:
+ gtype = 3
+
+ return gtype
+
+def get_show_legend(show_legend):
+ '''Get the value for show_legend
+ 0 - hide
+ 1 - (default) show
+ '''
+ rval = 1
+ if 'hide' == show_legend:
+ rval = 0
+
+ return rval
+
+def get_template_id(zapi, template_name):
+ '''
+ get related templates
+ '''
+ # Fetch templates by name
+ content = zapi.get_content('template',
+ 'get',
+ {'filter': {'host': template_name},})
+
+ if content.has_key('result'):
+ return content['result'][0]['templateid']
+
+ return None
+
+def get_color(color_in='black'):
+ ''' Receive a color and translate it to a hex representation of the color
+
+ Will have a few setup by default
+ '''
+ colors = {'black': '000000',
+ 'red': 'FF0000',
+ 'pink': 'FFC0CB',
+ 'purple': '800080',
+ 'orange': 'FFA500',
+ 'gold': 'FFD700',
+ 'yellow': 'FFFF00',
+ 'green': '008000',
+ 'cyan': '00FFFF',
+ 'aqua': '00FFFF',
+ 'blue': '0000FF',
+ 'brown': 'A52A2A',
+ 'gray': '808080',
+ 'grey': '808080',
+ 'silver': 'C0C0C0',
+ }
+ if colors.has_key(color_in):
+ return colors[color_in]
+
+ return color_in
+
+def get_line_style(style):
+ '''determine the line style
+ '''
+ line_style = {'line': 0,
+ 'filled': 1,
+ 'bold': 2,
+ 'dot': 3,
+ 'dashed': 4,
+ 'gradient': 5,
+ }
+
+ if line_style.has_key(style):
+ return line_style[style]
+
+ return 0
+
+def get_calc_function(func):
+ '''Determine the caclulation function'''
+ rval = 2 # default to avg
+ if 'min' in func:
+ rval = 1
+ elif 'max' in func:
+ rval = 4
+ elif 'all' in func:
+ rval = 7
+ elif 'last' in func:
+ rval = 9
+
+ return rval
+
+def get_graph_item_type(gtype):
+ '''Determine the graph item type
+ '''
+ rval = 0 # simple graph type
+ if 'sum' in gtype:
+ rval = 2
+
+ return rval
+
+def get_graph_items(zapi, gitems):
+ '''Get graph items by id'''
+
+ r_items = []
+ for item in gitems:
+ content = zapi.get_content('item%s' % item.get('item_type', ''),
+ 'get',
+ {'filter': {'name': item['item_name']}})
+ _ = item.pop('item_name')
+ color = get_color(item.pop('color', 'black'))
+ drawtype = get_line_style(item.get('line_style', 'line'))
+ func = get_calc_function(item.get('calc_func', 'avg'))
+ g_type = get_graph_item_type(item.get('graph_item_type', 'simple'))
+
+ if content.has_key('result'):
+ tmp = {'itemid': content['result'][0]['itemid'],
+ 'color': color,
+ 'drawtype': drawtype,
+ 'calc_fnc': func,
+ 'type': g_type,
+ }
+ r_items.append(tmp)
+
+ return r_items
+
+def compare_gitems(zabbix_items, user_items):
+ '''Compare zabbix results with the user's supplied items
+ return True if user_items are equal
+ return False if any of the values differ
+ '''
+ if len(zabbix_items) != len(user_items):
+ return False
+
+ for u_item in user_items:
+ for z_item in zabbix_items:
+ if u_item['itemid'] == z_item['itemid']:
+ if not all([str(value) == z_item[key] for key, value in u_item.items()]):
+ return False
+
+ return True
+
+# The branches are needed for CRUD and error handling
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible zabbix module for zbx_graphprototypes
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+ zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+ zbx_debug=dict(default=False, type='bool'),
+ name=dict(default=None, type='str'),
+ height=dict(default=None, type='int'),
+ width=dict(default=None, type='int'),
+ graph_type=dict(default='normal', type='str'),
+ show_legend=dict(default='show', type='str'),
+ state=dict(default='present', type='str'),
+ graph_items=dict(default=None, type='list'),
+ ),
+ #supports_check_mode=True
+ )
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+ module.params['zbx_user'],
+ module.params['zbx_password'],
+ module.params['zbx_debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'graphprototype'
+ state = module.params['state']
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {'filter': {'name': module.params['name']},
+ #'templateids': templateid,
+ 'selectGraphItems': 'extend',
+ })
+
+ #******#
+ # GET
+ #******#
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ #******#
+ # DELETE
+ #******#
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['graphid']])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ # Create and Update
+ if state == 'present':
+
+ params = {'name': module.params['name'],
+ 'height': module.params['height'],
+ 'width': module.params['width'],
+ 'graphtype': get_graph_type(module.params['graph_type']),
+ 'show_legend': get_show_legend(module.params['show_legend']),
+ 'gitems': get_graph_items(zapi, module.params['graph_items']),
+ }
+
+ # Remove any None valued params
+ _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+ #******#
+ # CREATE
+ #******#
+ if not exists(content):
+ content = zapi.get_content(zbx_class_name, 'create', params)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state='present')
+
+
+ ########
+ # UPDATE
+ ########
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if key == 'gitems':
+ if not compare_gitems(zab_results[key], value):
+ differences[key] = value
+
+ elif zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ # We have differences and need to update
+ differences['graphid'] = zab_results['graphid']
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/lib_zabbix/library/zbx_httptest.py b/roles/lib_zabbix/library/zbx_httptest.py
new file mode 100644
index 000000000..6b28117ad
--- /dev/null
+++ b/roles/lib_zabbix/library/zbx_httptest.py
@@ -0,0 +1,290 @@
+#!/usr/bin/env python
+'''
+ Ansible module for zabbix httpservice
+'''
+# vim: expandtab:tabstop=4:shiftwidth=4
+#
+# Zabbix item ansible module
+#
+#
+# Copyright 2015 Red Hat Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is in place because each module looks similar to each other.
+# These need duplicate code as their behavior is very similar
+# but different for each zabbix class.
+# pylint: disable=duplicate-code
+
+# pylint: disable=import-error
+from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
+
+def exists(content, key='result'):
+ ''' Check if key exists in content or the size of content[key] > 0
+ '''
+ if not content.has_key(key):
+ return False
+
+ if not content[key]:
+ return False
+
+ return True
+
+def get_authentication_method(auth):
+ ''' determine authentication type'''
+ rval = 0
+ if 'basic' in auth:
+ rval = 1
+ elif 'ntlm' in auth:
+ rval = 2
+
+ return rval
+
+def get_verify_host(verify):
+ '''
+ get the values for verify_host
+ '''
+ if verify:
+ return 1
+
+ return 0
+
+def get_app_id(zapi, application):
+ '''
+ get related templates
+ '''
+ # Fetch templates by name
+ content = zapi.get_content('application',
+ 'get',
+ {'search': {'name': application},
+ 'selectApplications': ['applicationid', 'name']})
+ if content.has_key('result'):
+ return content['result'][0]['applicationid']
+
+ return None
+
+def get_template_id(zapi, template_name):
+ '''
+ get related templates
+ '''
+ # Fetch templates by name
+ content = zapi.get_content('template',
+ 'get',
+ {'search': {'host': template_name},
+ 'selectApplications': ['applicationid', 'name']})
+ if content.has_key('result'):
+ return content['result'][0]['templateid']
+
+ return None
+
+def get_host_id_by_name(zapi, host_name):
+ '''Get host id by name'''
+ content = zapi.get_content('host',
+ 'get',
+ {'filter': {'name': host_name}})
+
+ return content['result'][0]['hostid']
+
+def get_status(status):
+ ''' Determine the status of the web scenario '''
+ rval = 0
+ if 'disabled' in status:
+ return 1
+
+ return rval
+
+def find_step(idx, step_list):
+ ''' find step by index '''
+ for step in step_list:
+ if str(step['no']) == str(idx):
+ return step
+
+ return None
+
+def steps_equal(zab_steps, user_steps):
+ '''compare steps returned from zabbix
+ and steps passed from user
+ '''
+
+ if len(user_steps) != len(zab_steps):
+ return False
+
+ for idx in range(1, len(user_steps)+1):
+
+ user = find_step(idx, user_steps)
+ zab = find_step(idx, zab_steps)
+
+ for key, value in user.items():
+ if str(value) != str(zab[key]):
+ return False
+
+ return True
+
+def process_steps(steps):
+ '''Preprocess the step parameters'''
+ for idx, step in enumerate(steps):
+ if not step.has_key('no'):
+ step['no'] = idx + 1
+
+ return steps
+
+# The branches are needed for CRUD and error handling
+# pylint: disable=too-many-branches
+def main():
+ '''
+ ansible zabbix module for zbx_item
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
+ zbx_user=dict(default=os.environ.get('ZABBIX_USER', None), type='str'),
+ zbx_password=dict(default=os.environ.get('ZABBIX_PASSWORD', None), type='str'),
+ zbx_debug=dict(default=False, type='bool'),
+ name=dict(default=None, require=True, type='str'),
+ agent=dict(default=None, type='str'),
+ template_name=dict(default=None, type='str'),
+ host_name=dict(default=None, type='str'),
+ interval=dict(default=60, type='int'),
+ application=dict(default=None, type='str'),
+ authentication=dict(default=None, type='str'),
+ http_user=dict(default=None, type='str'),
+ http_password=dict(default=None, type='str'),
+ state=dict(default='present', type='str'),
+ status=dict(default='enabled', type='str'),
+ steps=dict(default='present', type='list'),
+ verify_host=dict(default=False, type='bool'),
+ retries=dict(default=1, type='int'),
+ headers=dict(default=None, type='dict'),
+ query_type=dict(default='filter', choices=['filter', 'search'], type='str'),
+ ),
+ #supports_check_mode=True
+ mutually_exclusive=[['template_name', 'host_name']],
+ )
+
+ zapi = ZabbixAPI(ZabbixConnection(module.params['zbx_server'],
+ module.params['zbx_user'],
+ module.params['zbx_password'],
+ module.params['zbx_debug']))
+
+ #Set the instance and the template for the rest of the calls
+ zbx_class_name = 'httptest'
+ state = module.params['state']
+ hostid = None
+
+ # If a template name was passed then accept the template
+ if module.params['template_name']:
+ hostid = get_template_id(zapi, module.params['template_name'])
+ else:
+ hostid = get_host_id_by_name(zapi, module.params['host_name'])
+
+ # Fail if a template was not found matching the name
+ if not hostid:
+ module.exit_json(failed=True,
+ changed=False,
+ results='Error: Could find template or host with name [%s].' %
+ (module.params.get('template_name', module.params['host_name'])),
+ state="Unkown")
+
+ content = zapi.get_content(zbx_class_name,
+ 'get',
+ {module.params['query_type']: {'name': module.params['name']},
+ 'selectSteps': 'extend',
+ })
+
+ #******#
+ # GET
+ #******#
+ if state == 'list':
+ module.exit_json(changed=False, results=content['result'], state="list")
+
+ #******#
+ # DELETE
+ #******#
+ if state == 'absent':
+ if not exists(content):
+ module.exit_json(changed=False, state="absent")
+
+ content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0]['httptestid']])
+ module.exit_json(changed=True, results=content['result'], state="absent")
+
+ # Create and Update
+ if state == 'present':
+
+ params = {'name': module.params['name'],
+ 'hostid': hostid,
+ 'agent': module.params['agent'],
+ 'retries': module.params['retries'],
+ 'steps': process_steps(module.params['steps']),
+ 'applicationid': get_app_id(zapi, module.params['application']),
+ 'delay': module.params['interval'],
+ 'verify_host': get_verify_host(module.params['verify_host']),
+ 'status': get_status(module.params['status']),
+ 'headers': module.params['headers'],
+ 'http_user': module.params['http_user'],
+ 'http_password': module.params['http_password'],
+ }
+
+
+ # Remove any None valued params
+ _ = [params.pop(key, None) for key in params.keys() if params[key] is None]
+
+ #******#
+ # CREATE
+ #******#
+ if not exists(content):
+ content = zapi.get_content(zbx_class_name, 'create', params)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state='present')
+
+
+ ########
+ # UPDATE
+ ########
+ differences = {}
+ zab_results = content['result'][0]
+ for key, value in params.items():
+
+ if key == 'steps':
+ if not steps_equal(zab_results[key], value):
+ differences[key] = value
+
+ elif zab_results[key] != value and zab_results[key] != str(value):
+ differences[key] = value
+
+ # We have differences and need to update
+ if not differences:
+ module.exit_json(changed=False, results=zab_results, state="present")
+
+ differences['httptestid'] = zab_results['httptestid']
+ content = zapi.get_content(zbx_class_name, 'update', differences)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=False, results=content['error'], state="present")
+
+ module.exit_json(changed=True, results=content['result'], state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+
+main()
diff --git a/roles/lib_zabbix/library/zbx_item.py b/roles/lib_zabbix/library/zbx_item.py
index 5dc3cff9b..996c98fae 100644
--- a/roles/lib_zabbix/library/zbx_item.py
+++ b/roles/lib_zabbix/library/zbx_item.py
@@ -41,6 +41,24 @@ def exists(content, key='result'):
return True
+def get_data_type(data_type):
+ '''
+ Possible values:
+ 0 - decimal;
+ 1 - octal;
+ 2 - hexadecimal;
+ 3 - bool;
+ '''
+ vtype = 0
+ if 'octal' in data_type:
+ vtype = 1
+ elif 'hexadecimal' in data_type:
+ vtype = 2
+ elif 'bool' in data_type:
+ vtype = 3
+
+ return vtype
+
def get_value_type(value_type):
'''
Possible values:
@@ -158,6 +176,7 @@ def main():
template_name=dict(default=None, type='str'),
zabbix_type=dict(default='trapper', type='str'),
value_type=dict(default='int', type='str'),
+ data_type=dict(default='decimal', type='str'),
interval=dict(default=60, type='int'),
delta=dict(default=0, type='int'),
multiplier=dict(default=None, type='str'),
@@ -219,6 +238,7 @@ def main():
'hostid': templateid[0],
'type': get_zabbix_type(module.params['zabbix_type']),
'value_type': get_value_type(module.params['value_type']),
+ 'data_type': get_data_type(module.params['data_type']),
'applications': get_app_ids(module.params['applications'], app_name_ids),
'formula': formula,
'multiplier': use_multiplier,
diff --git a/roles/lib_zabbix/library/zbx_itemprototype.py b/roles/lib_zabbix/library/zbx_itemprototype.py
index 43498c015..aca9c8336 100644
--- a/roles/lib_zabbix/library/zbx_itemprototype.py
+++ b/roles/lib_zabbix/library/zbx_itemprototype.py
@@ -116,6 +116,24 @@ def get_zabbix_type(ztype):
return _vtype
+def get_data_type(data_type):
+ '''
+ Possible values:
+ 0 - decimal;
+ 1 - octal;
+ 2 - hexadecimal;
+ 3 - bool;
+ '''
+ vtype = 0
+ if 'octal' in data_type:
+ vtype = 1
+ elif 'hexadecimal' in data_type:
+ vtype = 2
+ elif 'bool' in data_type:
+ vtype = 3
+
+ return vtype
+
def get_value_type(value_type):
'''
Possible values:
@@ -175,6 +193,7 @@ def main():
interfaceid=dict(default=None, type='int'),
zabbix_type=dict(default='trapper', type='str'),
value_type=dict(default='float', type='str'),
+ data_type=dict(default='decimal', type='str'),
delay=dict(default=60, type='int'),
lifetime=dict(default=30, type='int'),
state=dict(default='present', type='str'),
@@ -238,6 +257,7 @@ def main():
'ruleid': get_rule_id(zapi, module.params['discoveryrule_key'], template['templateid']),
'type': get_zabbix_type(module.params['zabbix_type']),
'value_type': get_value_type(module.params['value_type']),
+ 'data_type': get_data_type(module.params['data_type']),
'applications': get_app_ids(zapi, module.params['applications'], template['templateid']),
'formula': formula,
'multiplier': use_multiplier,
diff --git a/roles/lib_zabbix/library/zbx_usergroup.py b/roles/lib_zabbix/library/zbx_usergroup.py
index 297d8ef91..3fd44d80c 100644
--- a/roles/lib_zabbix/library/zbx_usergroup.py
+++ b/roles/lib_zabbix/library/zbx_usergroup.py
@@ -27,6 +27,10 @@ zabbix ansible module for usergroups
# but different for each zabbix class.
# pylint: disable=duplicate-code
+# Disabling too-many-branches as we need the error checking and the if-statements
+# to determine the proper state
+# pylint: disable=too-many-branches
+
# pylint: disable=import-error
from openshift_tools.monitoring.zbxapi import ZabbixAPI, ZabbixConnection
@@ -92,26 +96,24 @@ def get_user_status(status):
return 1
-#def get_userids(zapi, users):
-# ''' Get userids from user aliases
-# '''
-# if not users:
-# return None
-#
-# userids = []
-# for alias in users:
-# content = zapi.get_content('user', 'get', {'search': {'alias': alias}})
-# if content['result']:
-# userids.append(content['result'][0]['userid'])
-#
-# return userids
+def get_userids(zapi, users):
+ ''' Get userids from user aliases
+ '''
+ if not users:
+ return None
+
+ userids = []
+ for alias in users:
+ content = zapi.get_content('user', 'get', {'search': {'alias': alias}})
+ if content['result']:
+ userids.append(content['result'][0]['userid'])
+
+ return userids
def main():
''' Ansible module for usergroup
'''
- ##def usergroup(self, name, rights=None, users=None, state='present', params=None):
-
module = AnsibleModule(
argument_spec=dict(
zbx_server=dict(default='https://localhost/zabbix/api_jsonrpc.php', type='str'),
@@ -123,7 +125,7 @@ def main():
status=dict(default='enabled', type='str'),
name=dict(default=None, type='str', required=True),
rights=dict(default=None, type='list'),
- #users=dict(default=None, type='list'),
+ users=dict(default=None, type='list'),
state=dict(default='present', type='str'),
),
#supports_check_mode=True
@@ -144,9 +146,15 @@ def main():
{'search': {'name': uname},
'selectUsers': 'userid',
})
+ #******#
+ # GET
+ #******#
if state == 'list':
module.exit_json(changed=False, results=content['result'], state="list")
+ #******#
+ # DELETE
+ #******#
if state == 'absent':
if not exists(content):
module.exit_json(changed=False, state="absent")
@@ -157,6 +165,7 @@ def main():
content = zapi.get_content(zbx_class_name, 'delete', [content['result'][0][idname]])
module.exit_json(changed=True, results=content['result'], state="absent")
+ # Create and Update
if state == 'present':
params = {'name': uname,
@@ -164,26 +173,37 @@ def main():
'users_status': get_user_status(module.params['status']),
'gui_access': get_gui_access(module.params['gui_access']),
'debug_mode': get_debug_mode(module.params['debug_mode']),
- #'userids': get_userids(zapi, module.params['users']),
+ 'userids': get_userids(zapi, module.params['users']),
}
+ # Remove any None valued params
_ = [params.pop(key, None) for key in params.keys() if params[key] == None]
+ #******#
+ # CREATE
+ #******#
if not exists(content):
# if we didn't find it, create it
content = zapi.get_content(zbx_class_name, 'create', params)
+
+ if content.has_key('error'):
+ module.exit_json(failed=True, changed=True, results=content['error'], state="present")
+
module.exit_json(changed=True, results=content['result'], state='present')
- # already exists, we need to update it
- # let's compare properties
+
+
+ ########
+ # UPDATE
+ ########
differences = {}
zab_results = content['result'][0]
for key, value in params.items():
if key == 'rights':
differences['rights'] = value
- #elif key == 'userids' and zab_results.has_key('users'):
- #if zab_results['users'] != value:
- #differences['userids'] = value
+ elif key == 'userids' and zab_results.has_key('users'):
+ if zab_results['users'] != value:
+ differences['userids'] = value
elif zab_results[key] != value and zab_results[key] != str(value):
differences[key] = value
diff --git a/roles/lib_zabbix/tasks/create_template.yml b/roles/lib_zabbix/tasks/create_template.yml
index 44c4e6766..47749389e 100644
--- a/roles/lib_zabbix/tasks/create_template.yml
+++ b/roles/lib_zabbix/tasks/create_template.yml
@@ -33,6 +33,7 @@
key: "{{ item.key }}"
name: "{{ item.name | default(item.key, true) }}"
value_type: "{{ item.value_type | default('int') }}"
+ data_type: "{{ item.data_type | default('decimal') }}"
description: "{{ item.description | default('', True) }}"
multiplier: "{{ item.multiplier | default('', True) }}"
units: "{{ item.units | default('', True) }}"
@@ -81,6 +82,7 @@
key: "{{ item.key }}"
discoveryrule_key: "{{ item.discoveryrule_key }}"
value_type: "{{ item.value_type }}"
+ data_type: "{{ item.data_type | default('decimal') }}"
template_name: "{{ template.name }}"
applications: "{{ item.applications }}"
description: "{{ item.description | default('', True) }}"
@@ -103,3 +105,27 @@
description: "{{ item.description | default('', True) }}"
with_items: template.ztriggerprototypes
when: template.ztriggerprototypes is defined
+
+- name: Create Graphs
+ zbx_graph:
+ zbx_server: "{{ server }}"
+ zbx_user: "{{ user }}"
+ zbx_password: "{{ password }}"
+ name: "{{ item.name }}"
+ height: "{{ item.height }}"
+ width: "{{ item.width }}"
+ graph_items: "{{ item.graph_items }}"
+ with_items: template.zgraphs
+ when: template.zgraphs is defined
+
+- name: Create Graph Prototypes
+ zbx_graphprototype:
+ zbx_server: "{{ server }}"
+ zbx_user: "{{ user }}"
+ zbx_password: "{{ password }}"
+ name: "{{ item.name }}"
+ height: "{{ item.height }}"
+ width: "{{ item.width }}"
+ graph_items: "{{ item.graph_items }}"
+ with_items: template.zgraphprototypes
+ when: template.zgraphprototypes is defined
diff --git a/roles/openshift_ansible_inventory/tasks/main.yml b/roles/openshift_ansible_inventory/tasks/main.yml
index f6919dada..2b99f8bcd 100644
--- a/roles/openshift_ansible_inventory/tasks/main.yml
+++ b/roles/openshift_ansible_inventory/tasks/main.yml
@@ -2,6 +2,16 @@
- yum:
name: "{{ item }}"
state: present
+ when: ansible_pkg_mgr == "yum"
+ with_items:
+ - openshift-ansible-inventory
+ - openshift-ansible-inventory-aws
+ - openshift-ansible-inventory-gce
+
+- dnf:
+ name: "{{ item }}"
+ state: present
+ when: ansible_pkg_mgr == "dnf"
with_items:
- openshift-ansible-inventory
- openshift-ansible-inventory-aws
diff --git a/roles/openshift_cluster_metrics/tasks/main.yml b/roles/openshift_cluster_metrics/tasks/main.yml
index 3938aba4c..9b7735e54 100644
--- a/roles/openshift_cluster_metrics/tasks/main.yml
+++ b/roles/openshift_cluster_metrics/tasks/main.yml
@@ -7,7 +7,7 @@
- name: Create InfluxDB Services
command: >
- {{ openshift.common.client_binary }} create -f
+ {{ openshift.common.client_binary }} create -f
/etc/openshift/cluster-metrics/influxdb.yaml
register: oex_influxdb_services
failed_when: "'already exists' not in oex_influxdb_services.stderr and oex_influxdb_services.rc != 0"
@@ -15,14 +15,14 @@
- name: Create Heapster Service Account
command: >
- {{ openshift.common.client_binary }} create -f
+ {{ openshift.common.client_binary }} create -f
/etc/openshift/cluster-metrics/heapster-serviceaccount.yaml
register: oex_heapster_serviceaccount
failed_when: "'already exists' not in oex_heapster_serviceaccount.stderr and oex_heapster_serviceaccount.rc != 0"
changed_when: false
- name: Add cluster-reader role to Heapster
- command: >
+ command: >
{{ openshift.common.admin_binary }} policy
add-cluster-role-to-user
cluster-reader
diff --git a/roles/openshift_common/tasks/main.yml b/roles/openshift_common/tasks/main.yml
index 38d5a08e4..c0982290d 100644
--- a/roles/openshift_common/tasks/main.yml
+++ b/roles/openshift_common/tasks/main.yml
@@ -3,6 +3,10 @@
msg: Flannel can not be used with openshift sdn
when: openshift_use_openshift_sdn | default(false) | bool and openshift_use_flannel | default(false) | bool
+- fail:
+ msg: openshift_hostname must be 64 characters or less
+ when: openshift_hostname is defined and openshift_hostname | length > 64
+
- name: Set common Cluster facts
openshift_facts:
role: common
@@ -18,6 +22,13 @@
deployment_type: "{{ openshift_deployment_type }}"
use_fluentd: "{{ openshift_use_fluentd | default(None) }}"
use_flannel: "{{ openshift_use_flannel | default(None) }}"
+ use_manageiq: "{{ openshift_use_manageiq | default(None) }}"
+
+ # For enterprise versions < 3.1 and origin versions < 1.1 we want to set the
+ # hostname by default.
+- set_fact:
+ set_hostname_default: "{{ not openshift.common.version_greater_than_3_1_or_1_1 }}"
- name: Set hostname
hostname: name={{ openshift.common.hostname }}
+ when: openshift_set_hostname | default(set_hostname_default) | bool
diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml
index 8e8bc6868..0bc5d7750 100644
--- a/roles/openshift_examples/defaults/main.yml
+++ b/roles/openshift_examples/defaults/main.yml
@@ -6,7 +6,9 @@ openshift_examples_load_db_templates: true
openshift_examples_load_xpaas: "{{ openshift_deployment_type in ['enterprise','openshift-enterprise','atomic-enterprise','online'] }}"
openshift_examples_load_quickstarts: true
-examples_base: /usr/share/openshift/examples
+content_version: "{{ 'v1.1' if openshift.common.version_greater_than_3_1_or_1_1 else 'v1.0' }}"
+
+examples_base: "/usr/share/openshift/examples"
image_streams_base: "{{ examples_base }}/image-streams"
centos_image_streams: "{{ image_streams_base}}/image-streams-centos7.json"
rhel_image_streams: "{{ image_streams_base}}/image-streams-rhel7.json"
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index a261a6ddd..090fb9042 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -6,9 +6,10 @@
# This script should be run from openshift-ansible/roles/openshift_examples
XPAAS_VERSION=ose-v1.1.0
-EXAMPLES_BASE=$(pwd)/files/examples
-find files/examples -name '*.json' -delete
-find files/examples -name '*.yaml' -delete
+ORIGIN_VERSION=v1.1
+EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
+find ${EXAMPLES_BASE} -name '*.json' -delete
+find ${EXAMPLES_BASE} -name '*.yaml' -delete
TEMP=`mktemp -d`
pushd $TEMP
diff --git a/roles/openshift_examples/files/examples/README.md b/roles/openshift_examples/files/examples/README.md
new file mode 100644
index 000000000..a4ba3bcb8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/README.md
@@ -0,0 +1,7 @@
+Image Streams and Templates may require specific versions of OpenShift so
+they've been namespaced. At this time, once a new version of Origin is released
+the older versions will only receive new content by speficic request.
+
+Please file an issue at https://github.com/openshift/openshift-ansible if you'd
+like to see older content updated and have tested to ensure it's backwards
+compatible.
diff --git a/roles/openshift_examples/files/examples/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.0/db-templates/mongodb-ephemeral-template.json
index 6b90fa54e..6b90fa54e 100644
--- a/roles/openshift_examples/files/examples/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.0/db-templates/mongodb-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.0/db-templates/mongodb-persistent-template.json
index 97b315600..97b315600 100644
--- a/roles/openshift_examples/files/examples/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.0/db-templates/mongodb-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.0/db-templates/mysql-ephemeral-template.json
index b384a5992..b384a5992 100644
--- a/roles/openshift_examples/files/examples/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.0/db-templates/mysql-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.0/db-templates/mysql-persistent-template.json
index 6e19f48f5..6e19f48f5 100644
--- a/roles/openshift_examples/files/examples/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.0/db-templates/mysql-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.0/db-templates/postgresql-ephemeral-template.json
index 60d6b8519..60d6b8519 100644
--- a/roles/openshift_examples/files/examples/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.0/db-templates/postgresql-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.0/db-templates/postgresql-persistent-template.json
index 91cd7453e..91cd7453e 100644
--- a/roles/openshift_examples/files/examples/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.0/db-templates/postgresql-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/v1.0/image-streams/image-streams-centos7-v1-0.json b/roles/openshift_examples/files/examples/v1.0/image-streams/image-streams-centos7-v1-0.json
new file mode 100644
index 000000000..268d680f4
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.0/image-streams/image-streams-centos7-v1-0.json
@@ -0,0 +1,285 @@
+{
+ "kind": "ImageStreamList",
+ "apiVersion": "v1",
+ "metadata": {},
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "ruby",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "openshift/ruby-20-centos7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "description": "Build and run Ruby 2.0 applications",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.0,ruby",
+ "version": "2.0",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "openshift/nodejs-010-centos7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "0.10",
+ "annotations": {
+ "description": "Build and run NodeJS 0.10 applications",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:0.10,nodejs:0.1,nodejs",
+ "version": "0.10",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "perl",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "openshift/perl-516-centos7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "5.16",
+ "annotations": {
+ "description": "Build and run Perl 5.16 applications",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.16,perl",
+ "version": "5.16",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "php",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "openshift/php-55-centos7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "5.5",
+ "annotations": {
+ "description": "Build and run PHP 5.5 applications",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:5.5,php",
+ "version": "5.5",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "python",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "openshift/python-33-centos7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "3.3",
+ "annotations": {
+ "description": "Build and run Python 3.3 applications",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.3,python",
+ "version": "3.3",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "wildfly",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "openshift/wildfly-81-centos7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "8.1",
+ "annotations": {
+ "description": "Build and run Java applications on Wildfly 8.1",
+ "iconClass": "icon-wildfly",
+ "tags": "builder,wildfly,java",
+ "supports":"wildfly:8.1,jee,java",
+ "version": "8.1",
+ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ },
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "openshift/mysql-55-centos7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "5.5",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "postgresql",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "openshift/postgresql-92-centos7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "9.2",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "openshift/mongodb-24-centos7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "2.4",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "openshift/jenkins-1-centos7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "1",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.0/image-streams/image-streams-centos7.json
index 1a78b1279..1a78b1279 100644
--- a/roles/openshift_examples/files/examples/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v1.0/image-streams/image-streams-centos7.json
diff --git a/roles/openshift_examples/files/examples/v1.0/image-streams/image-streams-rhel7-v1-0.json b/roles/openshift_examples/files/examples/v1.0/image-streams/image-streams-rhel7-v1-0.json
new file mode 100644
index 000000000..aa62ebd53
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.0/image-streams/image-streams-rhel7-v1-0.json
@@ -0,0 +1,254 @@
+{
+ "kind": "ImageStreamList",
+ "apiVersion": "v1",
+ "metadata": {},
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "ruby",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/openshift3/ruby-20-rhel7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "description": "Build and run Ruby 2.0 applications",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.0,ruby",
+ "version": "2.0",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/openshift3/nodejs-010-rhel7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "0.10",
+ "annotations": {
+ "description": "Build and run NodeJS 0.10 applications",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:0.10,nodejs:0.1,nodejs",
+ "version": "0.10",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "perl",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/openshift3/perl-516-rhel7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "5.16",
+ "annotations": {
+ "description": "Build and run Perl 5.16 applications",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.16,perl",
+ "version": "5.16",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "php",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/openshift3/php-55-rhel7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "5.5",
+ "annotations": {
+ "description": "Build and run PHP 5.5 applications",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:5.5,php",
+ "version": "5.5",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "python",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/openshift3/python-33-rhel7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "3.3",
+ "annotations": {
+ "description": "Build and run Python 3.3 applications",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.3,python",
+ "version": "3.3",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/openshift3/mysql-55-rhel7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "5.5",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "postgresql",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/openshift3/postgresql-92-rhel7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "9.2",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/openshift3/mongodb-24-rhel7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "2.4",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/openshift3/jenkins-1-rhel7",
+ "tags": [
+ {
+ "name": "latest"
+ },
+ {
+ "name": "1",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "latest"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.0/image-streams/image-streams-rhel7.json
index d2a8cfb1d..d2a8cfb1d 100644
--- a/roles/openshift_examples/files/examples/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v1.0/image-streams/image-streams-rhel7.json
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/logging-deployer.yaml
index b3b60bf9b..b3b60bf9b 100644
--- a/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/logging-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/logging-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/metrics-deployer.yaml
new file mode 100644
index 000000000..ddd9f2f75
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/enterprise/metrics-deployer.yaml
@@ -0,0 +1,116 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: metrics-deployer-template
+ annotations:
+ description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: metrics-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: REDEPLOY
+ value: ${REDEPLOY}
+ - name: USE_PERSISTENT_STORAGE
+ value: ${USE_PERSISTENT_STORAGE}
+ - name: HAWKULAR_METRICS_HOSTNAME
+ value: ${HAWKULAR_METRICS_HOSTNAME}
+ - name: CASSANDRA_NODES
+ value: ${CASSANDRA_NODES}
+ - name: CASSANDRA_PV_SIZE
+ value: ${CASSANDRA_PV_SIZE}
+ - name: METRIC_DURATION
+ value: ${METRIC_DURATION}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: metrics-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: metrics-deployer
+parameters:
+-
+ description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "registry.access.redhat.com/openshift3/"
+-
+ description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "3.1.0"
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc:443"
+-
+ description: "External hostname where clients will reach Hawkular Metrics"
+ name: HAWKULAR_METRICS_HOSTNAME
+ required: true
+-
+ description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
+ name: REDEPLOY
+ value: "false"
+-
+ description: "Set to true for persistent storage, set to false to use non persistent storage"
+ name: USE_PERSISTENT_STORAGE
+ value: "true"
+-
+ description: "The number of Cassandra Nodes to deploy for the initial cluster"
+ name: CASSANDRA_NODES
+ value: "1"
+-
+ description: "The persistent volume size for each of the Cassandra nodes"
+ name: CASSANDRA_PV_SIZE
+ value: "1Gi"
+-
+ description: "How many days metrics should be stored for."
+ name: METRIC_DURATION
+ value: "7"
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/logging-deployer.yaml
index 4c798e148..4c798e148 100644
--- a/roles/openshift_examples/files/examples/infrastructure-templates/origin/logging-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/logging-deployer.yaml
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/metrics-deployer.yaml
index d823b2587..3e9bcde5b 100644
--- a/roles/openshift_examples/files/examples/infrastructure-templates/enterprise/metrics-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.0/infrastructure-templates/origin/metrics-deployer.yaml
@@ -81,11 +81,11 @@ parameters:
-
description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
name: IMAGE_PREFIX
- value: "hawkular/"
+ value: "docker.io/openshift/origin-"
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
name: IMAGE_VERSION
- value: "0.7.0-SNAPSHOT"
+ value: "latest"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/cakephp-mysql.json
index da5679444..da5679444 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/cakephp-mysql.json
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/cakephp.json
index f426e1dd6..f426e1dd6 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/cakephp.json
+++ b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/cakephp.json
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/dancer-mysql.json
index 55f655102..55f655102 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/dancer-mysql.json
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/dancer.json b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/dancer.json
index 3ee19be83..3ee19be83 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/dancer.json
+++ b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/dancer.json
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/django-postgresql.json
index 749064e98..749064e98 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/django-postgresql.json
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/django.json b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/django.json
index 143a942ab..143a942ab 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/django.json
+++ b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/django.json
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/jenkins-ephemeral-template.json
index 14bd032af..14bd032af 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/jenkins-ephemeral-template.json
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/jenkins-persistent-template.json
index fa31de486..fa31de486 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/jenkins-persistent-template.json
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/nodejs-mongodb.json
index 8760b074c..8760b074c 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/nodejs-mongodb.json
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/nodejs.json
index e047266e3..e047266e3 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/nodejs.json
+++ b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/nodejs.json
diff --git a/roles/openshift_examples/files/examples/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/rails-postgresql.json
index b98282528..b98282528 100644
--- a/roles/openshift_examples/files/examples/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.0/quickstart-templates/rails-postgresql.json
diff --git a/roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v1.0/xpaas-streams/jboss-image-streams.json
index aaf5569ae..aaf5569ae 100644
--- a/roles/openshift_examples/files/examples/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-streams/jboss-image-streams.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/amq62-basic.json
index 3fd04c28c..3fd04c28c 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/amq62-basic.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/amq62-basic.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/amq62-persistent-ssl.json
index aa9e716cf..aa9e716cf 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent-ssl.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/amq62-persistent-ssl.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/amq62-persistent.json
index 3a2db3ce9..3a2db3ce9 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/amq62-persistent.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/amq62-persistent.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/amq62-ssl.json
index f61fb24c2..f61fb24c2 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/amq62-ssl.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/amq62-ssl.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-amq-persistent-s2i.json
index 2fc3b5b25..2fc3b5b25 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-amq-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-amq-s2i.json
index a420bb1ea..a420bb1ea 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap64-amq-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-amq-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap64-basic-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-basic-s2i.json
index 3f90eb8be..3f90eb8be 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap64-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap64-https-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-https-s2i.json
index 220d2f5b9..220d2f5b9 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap64-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-mongodb-persistent-s2i.json
index a1a3a9f2c..a1a3a9f2c 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-mongodb-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-mongodb-s2i.json
index dfd1443ed..dfd1443ed 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap64-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-mongodb-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-mysql-persistent-s2i.json
index fdd368a5f..fdd368a5f 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-mysql-s2i.json
index ff6bdc112..ff6bdc112 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap64-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-postgresql-persistent-s2i.json
index 6443afdb0..6443afdb0 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-postgresql-s2i.json
index e879e51cf..e879e51cf 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/eap64-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/eap64-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-basic-s2i.json
index 729079130..729079130 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-https-s2i.json
index 7ce7e7fe2..7ce7e7fe2 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
index 9a08ec0b0..9a08ec0b0 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
index b8dfb3ad3..b8dfb3ad3 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
index d36e330d3..d36e330d3 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-mysql-s2i.json
index f5309db60..f5309db60 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
index ee88a4c69..ee88a4c69 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
index f5940a7a1..f5940a7a1 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-basic-s2i.json
index b24ce40ae..b24ce40ae 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-basic-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-basic-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-https-s2i.json
index 7e788d0db..7e788d0db 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-https-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-https-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
index 2f1d69c75..2f1d69c75 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
index bad676f2e..bad676f2e 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
index e20a45982..e20a45982 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-mysql-s2i.json
index 1b9624756..1b9624756 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-mysql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-mysql-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
index dc492a38e..dc492a38e 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
diff --git a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
index 242b37a79..242b37a79 100644
--- a/roles/openshift_examples/files/examples/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
+++ b/roles/openshift_examples/files/examples/v1.0/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
diff --git a/roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-ephemeral-template.json
new file mode 100644
index 000000000..6b90fa54e
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-ephemeral-template.json
@@ -0,0 +1,184 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb-ephemeral",
+ "creationTimestamp": null,
+ "annotations": {
+ "description": "MongoDB database service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-mongodb",
+ "tags": "database,mongodb"
+ }
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mongo",
+ "protocol": "TCP",
+ "port": 27017,
+ "targetPort": 27017,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "portalIP": "",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mongodb:latest",
+ "namespace": "openshift"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mongodb",
+ "image": "mongodb",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${MONGODB_USER}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${MONGODB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${MONGODB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${MONGODB_ADMIN_PASSWORD}"
+ }
+ ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mongodb/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "description": "Database service name",
+ "value": "mongodb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_USER",
+ "description": "Username for MongoDB user that will be used for accessing the database",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "description": "Password for the MongoDB user",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "description": "Database name",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "description": "Password for the database admin user",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ }
+ ],
+ "labels": {
+ "template": "mongodb-ephemeral-template"
+ }
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-persistent-template.json
new file mode 100644
index 000000000..97b315600
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/db-templates/mongodb-persistent-template.json
@@ -0,0 +1,207 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "description": "MongoDB database service, with persistent storage. Scaling to more than one replica is not supported",
+ "iconClass": "icon-mongodb",
+ "tags": "database,mongodb"
+ }
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mongo",
+ "protocol": "TCP",
+ "port": 27017,
+ "targetPort": 27017,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "portalIP": "",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mongodb:latest",
+ "namespace": "openshift"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mongodb",
+ "image": "mongodb",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${MONGODB_USER}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${MONGODB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${MONGODB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${MONGODB_ADMIN_PASSWORD}"
+ }
+ ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mongodb/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "description": "Database service name",
+ "value": "mongodb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_USER",
+ "description": "Username for MongoDB user that will be used for accessing the database",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "description": "Password for the MongoDB user",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "description": "Database name",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "description": "Password for the database admin user",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "512Mi",
+ "required": true
+ }
+ ],
+ "labels": {
+ "template": "mongodb-persistent-template"
+ }
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.1/db-templates/mysql-ephemeral-template.json
new file mode 100644
index 000000000..b384a5992
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/db-templates/mysql-ephemeral-template.json
@@ -0,0 +1,173 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql-ephemeral",
+ "creationTimestamp": null,
+ "annotations": {
+ "description": "MySQL database service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-mysql-database",
+ "tags": "database,mysql"
+ }
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "protocol": "TCP",
+ "port": 3306,
+ "targetPort": 3306,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "portalIP": "",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mysql:latest",
+ "namespace": "openshift"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${MYSQL_USER}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${MYSQL_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${MYSQL_DATABASE}"
+ }
+ ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "description": "Database service name",
+ "value": "mysql",
+ "required": true
+ },
+ {
+ "name": "MYSQL_USER",
+ "description": "Username for MySQL user that will be used for accessing the database",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "description": "Password for the MySQL user",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "description": "Database name",
+ "value": "sampledb",
+ "required": true
+ }
+ ],
+ "labels": {
+ "template": "mysql-ephemeral-template"
+ }
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v1.1/db-templates/mysql-persistent-template.json
new file mode 100644
index 000000000..6e19f48f5
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/db-templates/mysql-persistent-template.json
@@ -0,0 +1,196 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "description": "MySQL database service, with persistent storage. Scaling to more than one replica is not supported",
+ "iconClass": "icon-mysql-database",
+ "tags": "database,mysql"
+ }
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "protocol": "TCP",
+ "port": 3306,
+ "targetPort": 3306,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "portalIP": "",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "mysql:latest",
+ "namespace": "openshift"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${MYSQL_USER}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${MYSQL_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${MYSQL_DATABASE}"
+ }
+ ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/mysql/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "description": "Database service name",
+ "value": "mysql",
+ "required": true
+ },
+ {
+ "name": "MYSQL_USER",
+ "description": "Username for MySQL user that will be used for accessing the database",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "description": "Password for the MySQL user",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "description": "Database name",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "512Mi",
+ "required": true
+ }
+ ],
+ "labels": {
+ "template": "mysql-persistent-template"
+ }
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.1/db-templates/postgresql-ephemeral-template.json
new file mode 100644
index 000000000..60d6b8519
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/db-templates/postgresql-ephemeral-template.json
@@ -0,0 +1,173 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "postgresql-ephemeral",
+ "creationTimestamp": null,
+ "annotations": {
+ "description": "PostgreSQL database service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-postgresql",
+ "tags": "database,postgresql"
+ }
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "protocol": "TCP",
+ "port": 5432,
+ "targetPort": 5432,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "portalIP": "",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "postgresql:latest",
+ "namespace": "openshift"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${POSTGRESQL_USER}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${POSTGRESQL_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${POSTGRESQL_DATABASE}"
+ }
+ ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "description": "Database service name",
+ "value": "postgresql",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_USER",
+ "description": "Username for PostgreSQL user that will be used for accessing the database",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "description": "Password for the PostgreSQL user",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "description": "Database name",
+ "value": "sampledb",
+ "required": true
+ }
+ ],
+ "labels": {
+ "template": "postgresql-ephemeral-template"
+ }
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v1.1/db-templates/postgresql-persistent-template.json
new file mode 100644
index 000000000..91cd7453e
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/db-templates/postgresql-persistent-template.json
@@ -0,0 +1,196 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "postgresql-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "description": "PostgreSQL database service, with persistent storage. Scaling to more than one replica is not supported",
+ "iconClass": "icon-postgresql",
+ "tags": "database,postgresql"
+ }
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "protocol": "TCP",
+ "port": 5432,
+ "targetPort": 5432,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "portalIP": "",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "postgresql:latest",
+ "namespace": "openshift"
+ },
+ "lastTriggeredImage": ""
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${POSTGRESQL_USER}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${POSTGRESQL_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${POSTGRESQL_DATABASE}"
+ }
+ ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/pgsql/data"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${DATABASE_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ },
+ "status": {}
+ }
+ ],
+ "parameters": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "description": "Database service name",
+ "value": "postgresql",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_USER",
+ "description": "Username for PostgreSQL user that will be used for accessing the database",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "description": "Password for the PostgreSQL user",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}",
+ "required": true
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "description": "Database name",
+ "value": "sampledb",
+ "required": true
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "512Mi",
+ "required": true
+ }
+ ],
+ "labels": {
+ "template": "postgresql-persistent-template"
+ }
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-centos7.json
new file mode 100644
index 000000000..1a78b1279
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-centos7.json
@@ -0,0 +1,412 @@
+{
+ "kind": "ImageStreamList",
+ "apiVersion": "v1",
+ "metadata": {},
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "ruby",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "2.0"
+ }
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "description": "Build and run Ruby 2.0 applications",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.0,ruby",
+ "version": "2.0",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "openshift/ruby-20-centos7:latest"
+ }
+ },
+ {
+ "name": "2.2",
+ "annotations": {
+ "description": "Build and run Ruby 2.2 applications",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.2,ruby",
+ "version": "2.2",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/ruby-22-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "0.10"
+ }
+ },
+ {
+ "name": "0.10",
+ "annotations": {
+ "description": "Build and run NodeJS 0.10 applications",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:0.10,nodejs:0.1,nodejs",
+ "version": "0.10",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "openshift/nodejs-010-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "perl",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "5.16"
+ }
+ },
+ {
+ "name": "5.16",
+ "annotations": {
+ "description": "Build and run Perl 5.16 applications",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.16,perl",
+ "version": "5.16",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "openshift/perl-516-centos7:latest"
+ }
+ },
+ {
+ "name": "5.20",
+ "annotations": {
+ "description": "Build and run Perl 5.20 applications",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.20,perl",
+ "version": "5.20",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/perl-520-centos7:latest"
+ }
+
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "php",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "5.5"
+ }
+ },
+ {
+ "name": "5.5",
+ "annotations": {
+ "description": "Build and run PHP 5.5 applications",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:5.5,php",
+ "version": "5.5",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "openshift/php-55-centos7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "annotations": {
+ "description": "Build and run PHP 5.6 applications",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:5.6,php",
+ "version": "5.6",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/php-56-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "python",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "3.3"
+ }
+ },
+ {
+ "name": "3.3",
+ "annotations": {
+ "description": "Build and run Python 3.3 applications",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.3,python",
+ "version": "3.3",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "openshift/python-33-centos7:latest"
+ }
+ },
+ {
+ "name": "2.7",
+ "annotations": {
+ "description": "Build and run Python 2.7 applications",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:2.7,python",
+ "version": "2.7",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/python-27-centos7:latest"
+ }
+ },
+ {
+ "name": "3.4",
+ "annotations": {
+ "description": "Build and run Python 3.4 applications",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.4,python",
+ "version": "3.4",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/python-34-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "wildfly",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "8.1"
+ }
+ },
+ {
+ "name": "8.1",
+ "annotations": {
+ "description": "Build and run Java applications on Wildfly 8.1",
+ "iconClass": "icon-wildfly",
+ "tags": "builder,wildfly,java",
+ "supports":"wildfly:8.1,jee,java",
+ "version": "8.1",
+ "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "openshift/wildfly-81-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "5.5"
+ }
+ },
+ {
+ "name": "5.5",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "openshift/mysql-55-centos7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/mysql-56-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "postgresql",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "9.2"
+ }
+ },
+ {
+ "name": "9.2",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "openshift/postgresql-92-centos7:latest"
+ }
+ },
+ {
+ "name": "9.4",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/postgresql-94-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "2.4"
+ }
+ },
+ {
+ "name": "2.4",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "openshift/mongodb-24-centos7:latest"
+ }
+ },
+ {
+ "name": "2.6",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "centos/mongodb-26-centos7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "1"
+ }
+ },
+ {
+ "name": "1",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "openshift/jenkins-1-centos7:latest"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-rhel7.json
new file mode 100644
index 000000000..d2a8cfb1d
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/image-streams/image-streams-rhel7.json
@@ -0,0 +1,378 @@
+{
+ "kind": "ImageStreamList",
+ "apiVersion": "v1",
+ "metadata": {},
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "ruby",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "2.0"
+ }
+ },
+ {
+ "name": "2.0",
+ "annotations": {
+ "description": "Build and run Ruby 2.0 applications",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.0,ruby",
+ "version": "2.0",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/ruby-20-rhel7:latest"
+ }
+ },
+ {
+ "name": "2.2",
+ "annotations": {
+ "description": "Build and run Ruby 2.2 applications",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.2,ruby",
+ "version": "2.2",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/ruby-22-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "0.10"
+ }
+ },
+ {
+ "name": "0.10",
+ "annotations": {
+ "description": "Build and run NodeJS 0.10 applications",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:0.10,nodejs:0.1,nodejs",
+ "version": "0.10",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/nodejs-010-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "perl",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "5.16"
+ }
+ },
+ {
+ "name": "5.16",
+ "annotations": {
+ "description": "Build and run Perl 5.16 applications",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.16,perl",
+ "version": "5.16",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/perl-516-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.20",
+ "annotations": {
+ "description": "Build and run Perl 5.20 applications",
+ "iconClass": "icon-perl",
+ "tags": "builder,perl",
+ "supports":"perl:5.20,perl",
+ "version": "5.20",
+ "sampleRepo": "https://github.com/openshift/dancer-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/perl-520-rhel7:latest"
+ }
+
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "php",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "5.5"
+ }
+ },
+ {
+ "name": "5.5",
+ "annotations": {
+ "description": "Build and run PHP 5.5 applications",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:5.5,php",
+ "version": "5.5",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/php-55-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "annotations": {
+ "description": "Build and run PHP 5.6 applications",
+ "iconClass": "icon-php",
+ "tags": "builder,php",
+ "supports":"php:5.6,php",
+ "version": "5.6",
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/php-56-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "python",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "3.3"
+ }
+ },
+ {
+ "name": "3.3",
+ "annotations": {
+ "description": "Build and run Python 3.3 applications",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.3,python",
+ "version": "3.3",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/python-33-rhel7:latest"
+ }
+ },
+ {
+ "name": "2.7",
+ "annotations": {
+ "description": "Build and run Python 2.7 applications",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:2.7,python",
+ "version": "2.7",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/python-27-rhel7:latest"
+ }
+ },
+ {
+ "name": "3.4",
+ "annotations": {
+ "description": "Build and run Python 3.4 applications",
+ "iconClass": "icon-python",
+ "tags": "builder,python",
+ "supports":"python:3.4,python",
+ "version": "3.4",
+ "sampleRepo": "https://github.com/openshift/django-ex.git"
+ },
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/python-34-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mysql",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "5.5"
+ }
+ },
+ {
+ "name": "5.5",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/mysql-55-rhel7:latest"
+ }
+ },
+ {
+ "name": "5.6",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/mysql-56-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "postgresql",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "9.2"
+ }
+ },
+ {
+ "name": "9.2",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/postgresql-92-rhel7:latest"
+ }
+ },
+ {
+ "name": "9.4",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/postgresql-94-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "mongodb",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "2.4"
+ }
+ },
+ {
+ "name": "2.4",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/mongodb-24-rhel7:latest"
+ }
+ },
+ {
+ "name": "2.6",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/rhscl/mongodb-26-rhel7:latest"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "tags": [
+ {
+ "name": "latest",
+ "from": {
+ "Kind": "ImageStreamTag",
+ "Name": "1"
+ }
+ },
+ {
+ "name": "1",
+ "from": {
+ "Kind": "DockerImage",
+ "Name": "registry.access.redhat.com/openshift3/jenkins-1-rhel7:latest"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/logging-deployer.yaml b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/logging-deployer.yaml
new file mode 100644
index 000000000..b3b60bf9b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/logging-deployer.yaml
@@ -0,0 +1,151 @@
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for deploying everything needed for aggregated logging. Requires cluster-admin 'logging-deployer' service account and 'logging-deployer' secret."
+ tags: "infrastructure"
+labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: logging-deployer
+parameters:
+-
+ description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "registry.access.redhat.com/openshift3/"
+-
+ description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "3.1.0"
+-
+ description: "If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+-
+ description: "External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ required: true
+-
+ description: "External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+-
+ description: "External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ required: true
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+-
+ description: "Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ required: true
+-
+ description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+-
+ description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+-
+ description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+-
+ description: "Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+-
+ description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+-
+ description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+-
+ description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+
diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml
new file mode 100644
index 000000000..ddd9f2f75
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/enterprise/metrics-deployer.yaml
@@ -0,0 +1,116 @@
+#!/bin/bash
+#
+# Copyright 2014-2015 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: metrics-deployer-template
+ annotations:
+ description: "Template for deploying the required Metrics integration. Requires cluster-admin 'metrics-deployer' service account and 'metrics-deployer' secret."
+ tags: "infrastructure"
+labels:
+ metrics-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: metrics-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}metrics-deployer:${IMAGE_VERSION}
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: REDEPLOY
+ value: ${REDEPLOY}
+ - name: USE_PERSISTENT_STORAGE
+ value: ${USE_PERSISTENT_STORAGE}
+ - name: HAWKULAR_METRICS_HOSTNAME
+ value: ${HAWKULAR_METRICS_HOSTNAME}
+ - name: CASSANDRA_NODES
+ value: ${CASSANDRA_NODES}
+ - name: CASSANDRA_PV_SIZE
+ value: ${CASSANDRA_PV_SIZE}
+ - name: METRIC_DURATION
+ value: ${METRIC_DURATION}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: metrics-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: metrics-deployer
+parameters:
+-
+ description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "registry.access.redhat.com/openshift3/"
+-
+ description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "3.1.0"
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc:443"
+-
+ description: "External hostname where clients will reach Hawkular Metrics"
+ name: HAWKULAR_METRICS_HOSTNAME
+ required: true
+-
+ description: "If set to true the deployer will try and delete all the existing components before trying to redeploy."
+ name: REDEPLOY
+ value: "false"
+-
+ description: "Set to true for persistent storage, set to false to use non persistent storage"
+ name: USE_PERSISTENT_STORAGE
+ value: "true"
+-
+ description: "The number of Cassandra Nodes to deploy for the initial cluster"
+ name: CASSANDRA_NODES
+ value: "1"
+-
+ description: "The persistent volume size for each of the Cassandra nodes"
+ name: CASSANDRA_PV_SIZE
+ value: "1Gi"
+-
+ description: "How many days metrics should be stored for."
+ name: METRIC_DURATION
+ value: "7"
diff --git a/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/logging-deployer.yaml b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/logging-deployer.yaml
new file mode 100644
index 000000000..4c798e148
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/logging-deployer.yaml
@@ -0,0 +1,151 @@
+apiVersion: "v1"
+kind: "Template"
+metadata:
+ name: logging-deployer-template
+ annotations:
+ description: "Template for deploying everything needed for aggregated logging. Requires cluster-admin 'logging-deployer' service account and 'logging-deployer' secret."
+ tags: "infrastructure"
+labels:
+ logging-infra: deployer
+ provider: openshift
+ component: deployer
+objects:
+-
+ apiVersion: v1
+ kind: Pod
+ metadata:
+ generateName: logging-deployer-
+ spec:
+ containers:
+ - image: ${IMAGE_PREFIX}logging-deployment:${IMAGE_VERSION}
+ imagePullPolicy: Always
+ name: deployer
+ volumeMounts:
+ - name: secret
+ mountPath: /secret
+ readOnly: true
+ - name: empty
+ mountPath: /etc/deploy
+ env:
+ - name: PROJECT
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: IMAGE_PREFIX
+ value: ${IMAGE_PREFIX}
+ - name: IMAGE_VERSION
+ value: ${IMAGE_VERSION}
+ - name: ENABLE_OPS_CLUSTER
+ value: ${ENABLE_OPS_CLUSTER}
+ - name: KIBANA_HOSTNAME
+ value: ${KIBANA_HOSTNAME}
+ - name: KIBANA_OPS_HOSTNAME
+ value: ${KIBANA_OPS_HOSTNAME}
+ - name: PUBLIC_MASTER_URL
+ value: ${PUBLIC_MASTER_URL}
+ - name: MASTER_URL
+ value: ${MASTER_URL}
+ - name: ES_INSTANCE_RAM
+ value: ${ES_INSTANCE_RAM}
+ - name: ES_CLUSTER_SIZE
+ value: ${ES_CLUSTER_SIZE}
+ - name: ES_NODE_QUORUM
+ value: ${ES_NODE_QUORUM}
+ - name: ES_RECOVER_AFTER_NODES
+ value: ${ES_RECOVER_AFTER_NODES}
+ - name: ES_RECOVER_EXPECTED_NODES
+ value: ${ES_RECOVER_EXPECTED_NODES}
+ - name: ES_RECOVER_AFTER_TIME
+ value: ${ES_RECOVER_AFTER_TIME}
+ - name: ES_OPS_INSTANCE_RAM
+ value: ${ES_OPS_INSTANCE_RAM}
+ - name: ES_OPS_CLUSTER_SIZE
+ value: ${ES_OPS_CLUSTER_SIZE}
+ - name: ES_OPS_NODE_QUORUM
+ value: ${ES_OPS_NODE_QUORUM}
+ - name: ES_OPS_RECOVER_AFTER_NODES
+ value: ${ES_OPS_RECOVER_AFTER_NODES}
+ - name: ES_OPS_RECOVER_EXPECTED_NODES
+ value: ${ES_OPS_RECOVER_EXPECTED_NODES}
+ - name: ES_OPS_RECOVER_AFTER_TIME
+ value: ${ES_OPS_RECOVER_AFTER_TIME}
+ dnsPolicy: ClusterFirst
+ restartPolicy: Never
+ serviceAccount: logging-deployer
+ volumes:
+ - name: empty
+ emptyDir: {}
+ - name: secret
+ secret:
+ secretName: logging-deployer
+parameters:
+-
+ description: 'Specify prefix for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set prefix "openshift/origin-"'
+ name: IMAGE_PREFIX
+ value: "docker.io/openshift/origin-"
+-
+ description: 'Specify version for logging components; e.g. for "openshift/origin-logging-deployer:v1.1", set version "v1.1"'
+ name: IMAGE_VERSION
+ value: "latest"
+-
+ description: "If true, set up to use a second ES cluster for ops logs."
+ name: ENABLE_OPS_CLUSTER
+ value: "false"
+-
+ description: "External hostname where clients will reach kibana"
+ name: KIBANA_HOSTNAME
+ required: true
+-
+ description: "External hostname at which admins will visit the ops Kibana."
+ name: KIBANA_OPS_HOSTNAME
+ value: kibana-ops.example.com
+-
+ description: "External URL for the master, for OAuth purposes"
+ name: PUBLIC_MASTER_URL
+ required: true
+-
+ description: "Internal URL for the master, for authentication retrieval"
+ name: MASTER_URL
+ value: "https://kubernetes.default.svc.cluster.local"
+-
+ description: "Amount of RAM to reserve per ElasticSearch instance."
+ name: ES_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many instances of ElasticSearch to deploy."
+ name: ES_CLUSTER_SIZE
+ required: true
+-
+ description: "Number of nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_NODE_QUORUM
+-
+ description: "Number of nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_CLUSTER_SIZE."
+ name: ES_RECOVER_AFTER_NODES
+-
+ description: "Number of nodes desired to be present before the cluster will recover from a full restart. By default, ES_CLUSTER_SIZE."
+ name: ES_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* nodes to be present when cluster is recovering from a full restart."
+ name: ES_RECOVER_AFTER_TIME
+ value: "5m"
+-
+ description: "Amount of RAM to reserve per ops ElasticSearch instance."
+ name: ES_OPS_INSTANCE_RAM
+ value: "8G"
+-
+ description: "How many ops instances of ElasticSearch to deploy. By default, ES_CLUSTER_SIZE."
+ name: ES_OPS_CLUSTER_SIZE
+-
+ description: "Number of ops nodes required to elect a master (ES minimum_master_nodes). By default, derived from ES_CLUSTER_SIZE / 2 + 1."
+ name: ES_OPS_NODE_QUORUM
+-
+ description: "Number of ops nodes required to be present before the cluster will recover from a full restart. By default, one fewer than ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_AFTER_NODES
+-
+ description: "Number of ops nodes desired to be present before the cluster will recover from a full restart. By default, ES_OPS_CLUSTER_SIZE."
+ name: ES_OPS_RECOVER_EXPECTED_NODES
+-
+ description: "Timeout for *expected* ops nodes to be present when cluster is recovering from a full restart."
+ name: ES_OPS_RECOVER_AFTER_TIME
+ value: "5m"
+
diff --git a/roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/metrics-deployer.yaml
index d823b2587..3e9bcde5b 100644
--- a/roles/openshift_examples/files/examples/infrastructure-templates/origin/metrics-deployer.yaml
+++ b/roles/openshift_examples/files/examples/v1.1/infrastructure-templates/origin/metrics-deployer.yaml
@@ -81,11 +81,11 @@ parameters:
-
description: 'Specify prefix for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set prefix "openshift/origin-"'
name: IMAGE_PREFIX
- value: "hawkular/"
+ value: "docker.io/openshift/origin-"
-
description: 'Specify version for metrics components; e.g. for "openshift/origin-metrics-deployer:v1.1", set version "v1.1"'
name: IMAGE_VERSION
- value: "0.7.0-SNAPSHOT"
+ value: "latest"
-
description: "Internal URL for the master, for authentication retrieval"
name: MASTER_URL
diff --git a/roles/openshift_examples/files/examples/v1.1/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/cakephp-mysql.json
new file mode 100644
index 000000000..da5679444
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/cakephp-mysql.json
@@ -0,0 +1,378 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-mysql-example",
+ "annotations": {
+ "description": "An example CakePHP application with a MySQL database",
+ "tags": "instant-app,php,cakephp,mysql",
+ "iconClass": "icon-php"
+ }
+ },
+ "labels": {
+ "template": "cakephp-mysql-example"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-mysql-example",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "cakephp-mysql-example"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-mysql-example"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "cakephp-mysql-example"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-mysql-example",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-mysql-example",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "openshift",
+ "name": "php:5.5"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "cakephp-mysql-example:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-mysql-example",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "recreateParams": {
+ "pre": {
+ "failurePolicy": "Abort",
+ "execNewPod": {
+ "command": [
+ "./migrate-database.sh"
+ ],
+ "containerName": "cakephp-mysql-example"
+ }
+ }
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "cakephp-mysql-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "cakephp-mysql-example:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "cakephp-mysql-example"
+ },
+ "template": {
+ "metadata": {
+ "name": "cakephp-mysql-example",
+ "labels": {
+ "name": "cakephp-mysql-example"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "cakephp-mysql-example",
+ "image": "cakephp-mysql-example",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "value": "${DATABASE_ENGINE}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "DATABASE_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "CAKEPHP_SECRET_TOKEN",
+ "value": "${CAKEPHP_SECRET_TOKEN}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_SALT",
+ "value": "${CAKEPHP_SECURITY_SALT}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_CIPHER_SEED",
+ "value": "${CAKEPHP_SECURITY_CIPHER_SEED}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "value": "${OPCACHE_REVALIDATE_FREQ}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mysql",
+ "image": "${MYSQL_IMAGE}",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "description": "The URL of the repository with your application source code",
+ "value": "https://github.com/openshift/cakephp-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "description": "The exposed hostname that will route to the CakePHP service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the GitHub webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "description": "Database service name",
+ "value": "mysql"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "description": "Database engine: postgresql, mysql or sqlite (default)",
+ "value": "mysql"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "description": "Database name",
+ "value": "default"
+ },
+ {
+ "name": "DATABASE_USER",
+ "description": "Database user name",
+ "value": "cakephp"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "description": "Database user password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "MYSQL_IMAGE",
+ "description": "Image to use for mysql",
+ "value": "openshift/mysql-55-centos7"
+ },
+ {
+ "name": "CAKEPHP_SECRET_TOKEN",
+ "description": "Set this to a long random string",
+ "generate": "expression",
+ "from": "[\\w]{50}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_SALT",
+ "description": "Security salt for session hash",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_CIPHER_SEED",
+ "description": "Security cipher seed for session hash",
+ "generate": "expression",
+ "from": "[0-9]{30}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "description": "The How often to check script timestamps for updates, in seconds. 0 will result in OPcache checking for updates on every request.",
+ "value": "2"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/quickstart-templates/cakephp.json b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/cakephp.json
new file mode 100644
index 000000000..f426e1dd6
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/cakephp.json
@@ -0,0 +1,275 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-example",
+ "annotations": {
+ "description": "An example CakePHP application with no database",
+ "tags": "instant-app,php,cakephp",
+ "iconClass": "icon-php"
+ }
+ },
+ "labels": {
+ "template": "cakephp-example"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-example",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "cakephp-example"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-example"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "cakephp-example"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-example",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-example",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "openshift",
+ "name": "php:5.5"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "cakephp-example:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "cakephp-example",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "cakephp-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "cakephp-example:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "cakephp-example"
+ },
+ "template": {
+ "metadata": {
+ "name": "cakephp-example",
+ "labels": {
+ "name": "cakephp-example"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "cakephp-example",
+ "image": "cakephp-example",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "value": "${DATABASE_ENGINE}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "DATABASE_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "CAKEPHP_SECRET_TOKEN",
+ "value": "${CAKEPHP_SECRET_TOKEN}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_SALT",
+ "value": "${CAKEPHP_SECURITY_SALT}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_CIPHER_SEED",
+ "value": "${CAKEPHP_SECURITY_CIPHER_SEED}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "value": "${OPCACHE_REVALIDATE_FREQ}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "description": "The URL of the repository with your application source code",
+ "value": "https://github.com/openshift/cakephp-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "description": "The exposed hostname that will route to the CakePHP service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the GitHub webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "description": "Database service name"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "description": "Database engine: postgresql, mysql or sqlite (default)"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "description": "Database name"
+ },
+ {
+ "name": "DATABASE_USER",
+ "description": "Database user name"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "description": "Database user password"
+ },
+ {
+ "name": "CAKEPHP_SECRET_TOKEN",
+ "description": "Set this to a long random string",
+ "generate": "expression",
+ "from": "[\\w]{50}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_SALT",
+ "description": "Security salt for session hash",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "CAKEPHP_SECURITY_CIPHER_SEED",
+ "description": "Security cipher seed for session hash",
+ "generate": "expression",
+ "from": "[0-9]{30}"
+ },
+ {
+ "name": "OPCACHE_REVALIDATE_FREQ",
+ "description": "The How often to check script timestamps for updates, in seconds. 0 will result in OPcache checking for updates on every request.",
+ "value": "2"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/dancer-mysql.json
new file mode 100644
index 000000000..55f655102
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/dancer-mysql.json
@@ -0,0 +1,348 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-mysql-example",
+ "annotations": {
+ "description": "An example Dancer application with a MySQL database",
+ "tags": "instant-app,perl,dancer,mysql",
+ "iconClass": "icon-perl"
+ }
+ },
+ "labels": {
+ "template": "dancer-mysql-example"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-mysql-example",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "dancer-mysql-example"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-mysql-example"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "dancer-mysql-example"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-mysql-example",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-mysql-example",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "openshift",
+ "name": "perl:5.16"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "dancer-mysql-example:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-mysql-example",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "dancer-mysql-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "dancer-mysql-example:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "dancer-mysql-example"
+ },
+ "template": {
+ "metadata": {
+ "name": "dancer-mysql-example",
+ "labels": {
+ "name": "dancer-mysql-example"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "dancer-mysql-example",
+ "image": "dancer-mysql-example",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "MYSQL_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "value": "${SECRET_KEY_BASE}"
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "value": "${PERL_APACHE2_RELOAD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mysql",
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mysql",
+ "image": "${MYSQL_IMAGE}",
+ "ports": [
+ {
+ "containerPort": 3306
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "description": "The URL of the repository with your application source code",
+ "value": "https://github.com/openshift/dancer-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "description": "The exposed hostname that will route to the Dancer service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the GitHub webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "ADMIN_USERNAME",
+ "description": "administrator username",
+ "generate": "expression",
+ "from": "admin[A-Z0-9]{3}"
+ },
+ {
+ "name": "ADMIN_PASSWORD",
+ "description": "administrator password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{8}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "description": "Database service name",
+ "value": "database"
+ },
+ {
+ "name": "DATABASE_USER",
+ "description": "database username",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "description": "database password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{8}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "description": "database name",
+ "value": "sampledb"
+ },
+ {
+ "name": "MYSQL_IMAGE",
+ "description": "Image to use for mysql",
+ "value": "openshift/mysql-55-centos7"
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "description": "Set this to \"true\" to enable automatic reloading of modified Perl modules",
+ "value": ""
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "description": "Your secret key for verifying the integrity of signed cookies",
+ "generate": "expression",
+ "from": "[a-z0-9]{127}"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/quickstart-templates/dancer.json b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/dancer.json
new file mode 100644
index 000000000..3ee19be83
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/dancer.json
@@ -0,0 +1,211 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-example",
+ "annotations": {
+ "description": "An example Dancer application with no database",
+ "tags": "instant-app,perl,dancer",
+ "iconClass": "icon-perl"
+ }
+ },
+ "labels": {
+ "template": "dancer-example"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-example",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "dancer-example"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-example"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "dancer-example"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-example",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-example",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "openshift",
+ "name": "perl:5.16"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "dancer-example:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "dancer-example",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "dancer-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "dancer-example:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "dancer-example"
+ },
+ "template": {
+ "metadata": {
+ "name": "dancer-example",
+ "labels": {
+ "name": "dancer-example"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "dancer-example",
+ "image": "dancer-example",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "value": "${PERL_APACHE2_RELOAD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "description": "The URL of the repository with your application source code",
+ "value": "https://github.com/openshift/dancer-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "description": "The exposed hostname that will route to the Dancer service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the GitHub webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "description": "Your secret key for verifying the integrity of signed cookies",
+ "generate": "expression",
+ "from": "[a-z0-9]{127}"
+ },
+ {
+ "name": "PERL_APACHE2_RELOAD",
+ "description": "Set this to \"true\" to enable automatic reloading of modified Perl modules",
+ "value": ""
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/django-postgresql.json
new file mode 100644
index 000000000..749064e98
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/django-postgresql.json
@@ -0,0 +1,346 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-psql-example",
+ "annotations": {
+ "description": "An example Django application with a PostgreSQL database",
+ "tags": "instant-app,python,django,postgresql",
+ "iconClass": "icon-python"
+ }
+ },
+ "labels": {
+ "template": "django-psql-example"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-psql-example",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "django-psql-example"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-psql-example"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "django-psql-example"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-psql-example",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-psql-example",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "openshift",
+ "name": "python:3.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "django-psql-example:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-psql-example",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "django-psql-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "django-psql-example:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "django-psql-example"
+ },
+ "template": {
+ "metadata": {
+ "name": "django-psql-example",
+ "labels": {
+ "name": "django-psql-example"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "django-psql-example",
+ "image": "django-psql-example",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "value": "${DATABASE_ENGINE}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "DATABASE_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "APP_CONFIG",
+ "value": "${APP_CONFIG}"
+ },
+ {
+ "name": "DJANGO_SECRET_KEY",
+ "value": "${DJANGO_SECRET_KEY}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": "${POSTGRESQL_IMAGE}",
+ "ports": [
+ {
+ "containerPort": 5432
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "description": "The URL of the repository with your application source code",
+ "value": "https://github.com/openshift/django-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "description": "The exposed hostname that will route to the Django service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the GitHub webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "description": "Database service name",
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "description": "Database engine: postgresql, mysql or sqlite (default)",
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "description": "Database name",
+ "value": "default"
+ },
+ {
+ "name": "DATABASE_USER",
+ "description": "Database user name",
+ "value": "django"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "description": "Database user password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "POSTGRESQL_IMAGE",
+ "description": "Image to use for postgresql",
+ "value": "openshift/postgresql-92-centos7"
+ },
+ {
+ "name": "APP_CONFIG",
+ "description": "Relative path to Gunicorn configuration file (optional)"
+ },
+ {
+ "name": "DJANGO_SECRET_KEY",
+ "description": "Set this to a long random string",
+ "generate": "expression",
+ "from": "[\\w]{50}"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/quickstart-templates/django.json b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/django.json
new file mode 100644
index 000000000..143a942ab
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/django.json
@@ -0,0 +1,254 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-example",
+ "annotations": {
+ "description": "An example Django application with no database",
+ "tags": "instant-app,python,django",
+ "iconClass": "icon-python"
+ }
+ },
+ "labels": {
+ "template": "django-example"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-example",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "django-example"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-example"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "django-example"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-example",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-example",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "openshift",
+ "name": "python:3.3"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "django-example:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "django-example",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "django-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "django-example:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "django-example"
+ },
+ "template": {
+ "metadata": {
+ "name": "django-example",
+ "labels": {
+ "name": "django-example"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "django-example",
+ "image": "django-example",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "value": "${DATABASE_ENGINE}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "DATABASE_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "APP_CONFIG",
+ "value": "${APP_CONFIG}"
+ },
+ {
+ "name": "DJANGO_SECRET_KEY",
+ "value": "${DJANGO_SECRET_KEY}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "description": "The URL of the repository with your application source code",
+ "value": "https://github.com/openshift/django-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "description": "The exposed hostname that will route to the Django service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the GitHub webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "description": "Database service name"
+ },
+ {
+ "name": "DATABASE_ENGINE",
+ "description": "Database engine: postgresql, mysql or sqlite (default)"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "description": "Database name"
+ },
+ {
+ "name": "DATABASE_USER",
+ "description": "Database user name"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "description": "Database user password"
+ },
+ {
+ "name": "APP_CONFIG",
+ "description": "Relative path to Gunicorn configuration file (optional)"
+ },
+ {
+ "name": "DJANGO_SECRET_KEY",
+ "description": "Set this to a long random string",
+ "generate": "expression",
+ "from": "[\\w]{50}"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/jenkins-ephemeral-template.json
new file mode 100644
index 000000000..14bd032af
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/jenkins-ephemeral-template.json
@@ -0,0 +1,150 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins-ephemeral",
+ "creationTimestamp": null,
+ "annotations": {
+ "description": "Jenkins service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-jenkins",
+ "tags": "database,jenkins"
+ }
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "protocol": "TCP",
+ "port": 8080,
+ "targetPort": 8080,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "portalIP": "",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "tls": {
+ "termination": "edge",
+ "certificate": "-----BEGIN CERTIFICATE-----\nMIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBoTELMAkGA1UEBhMCVVMx\nCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0Rl\nZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0ExGjAYBgNVBAMMEXd3\ndy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFtcGxlQGV4YW1wbGUu\nY29tMB4XDTE1MDExMjE0MTk0MVoXDTE2MDExMjE0MTk0MVowfDEYMBYGA1UEAwwP\nd3d3LmV4YW1wbGUuY29tMQswCQYDVQQIDAJTQzELMAkGA1UEBhMCVVMxIjAgBgkq\nhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20xEDAOBgNVBAoMB0V4YW1wbGUx\nEDAOBgNVBAsMB0V4YW1wbGUwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMrv\ngu6ZTTefNN7jjiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm\n47VRx5Qrf/YLXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1M\nmNrQUgZyQC6XIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAGjDTALMAkGA1UdEwQC\nMAAwDQYJKoZIhvcNAQEFBQADggEBAFCi7ZlkMnESvzlZCvv82Pq6S46AAOTPXdFd\nTMvrh12E1sdVALF1P1oYFJzG1EiZ5ezOx88fEDTW+Lxb9anw5/KJzwtWcfsupf1m\nV7J0D3qKzw5C1wjzYHh9/Pz7B1D0KthQRATQCfNf8s6bbFLaw/dmiIUhHLtIH5Qc\nyfrejTZbOSP77z8NOWir+BWWgIDDB2//3AkDIQvT20vmkZRhkqSdT7et4NmXOX/j\njhPti4b2Fie0LeuvgaOdKjCpQQNrYthZHXeVlOLRhMTSk3qUczenkKTOhvP7IS9q\n+Dzv5hqgSfvMG392KWh5f8xXfJNs4W5KLbZyl901MeReiLrPH3w=\n-----END CERTIFICATE-----",
+ "key": "-----BEGIN PRIVATE KEY-----\nMIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMrvgu6ZTTefNN7j\njiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm47VRx5Qrf/YL\nXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1MmNrQUgZyQC6X\nIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAECgYEAnxOjEj/vrLNLMZE1Q9H7PZVF\nWdP/JQVNvQ7tCpZ3ZdjxHwkvf//aQnuxS5yX2Rnf37BS/TZu+TIkK4373CfHomSx\nUTAn2FsLmOJljupgGcoeLx5K5nu7B7rY5L1NHvdpxZ4YjeISrRtEPvRakllENU5y\ngJE8c2eQOx08ZSRE4TkCQQD7dws2/FldqwdjJucYijsJVuUdoTqxP8gWL6bB251q\nelP2/a6W2elqOcWId28560jG9ZS3cuKvnmu/4LG88vZFAkEAzphrH3673oTsHN+d\nuBd5uyrlnGjWjuiMKv2TPITZcWBjB8nJDSvLneHF59MYwejNNEof2tRjgFSdImFH\nmi995wJBAMtPjW6wiqRz0i41VuT9ZgwACJBzOdvzQJfHgSD9qgFb1CU/J/hpSRIM\nkYvrXK9MbvQFvG6x4VuyT1W8mpe1LK0CQAo8VPpffhFdRpF7psXLK/XQ/0VLkG3O\nKburipLyBg/u9ZkaL0Ley5zL5dFBjTV2Qkx367Ic2b0u9AYTCcgi2DsCQQD3zZ7B\nv7BOm7MkylKokY2MduFFXU0Bxg6pfZ7q3rvg8gqhUFbaMStPRYg6myiDiW/JfLhF\nTcFT4touIo7oriFJ\n-----END PRIVATE KEY-----",
+ "caCertificate": "-----BEGIN CERTIFICATE-----\nMIIEFzCCAv+gAwIBAgIJALK1iUpF2VQLMA0GCSqGSIb3DQEBBQUAMIGhMQswCQYD\nVQQGEwJVUzELMAkGA1UECAwCU0MxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoG\nA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEQMA4GA1UECwwHVGVzdCBDQTEaMBgG\nA1UEAwwRd3d3LmV4YW1wbGVjYS5jb20xIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVA\nZXhhbXBsZS5jb20wHhcNMTUwMTEyMTQxNTAxWhcNMjUwMTA5MTQxNTAxWjCBoTEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkx\nHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0Ex\nGjAYBgNVBAMMEXd3dy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFt\ncGxlQGV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA\nw2rK1J2NMtQj0KDug7g7HRKl5jbf0QMkMKyTU1fBtZ0cCzvsF4CqV11LK4BSVWaK\nrzkaXe99IVJnH8KdOlDl5Dh/+cJ3xdkClSyeUT4zgb6CCBqg78ePp+nN11JKuJlV\nIG1qdJpB1J5O/kCLsGcTf7RS74MtqMFo96446Zvt7YaBhWPz6gDaO/TUzfrNcGLA\nEfHVXkvVWqb3gqXUztZyVex/gtP9FXQ7gxTvJml7UkmT0VAFjtZnCqmFxpLZFZ15\n+qP9O7Q2MpsGUO/4vDAuYrKBeg1ZdPSi8gwqUP2qWsGd9MIWRv3thI2903BczDc7\nr8WaIbm37vYZAS9G56E4+wIDAQABo1AwTjAdBgNVHQ4EFgQUugLrSJshOBk5TSsU\nANs4+SmJUGwwHwYDVR0jBBgwFoAUugLrSJshOBk5TSsUANs4+SmJUGwwDAYDVR0T\nBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaMJ33zAMV4korHo5aPfayV3uHoYZ\n1ChzP3eSsF+FjoscpoNSKs91ZXZF6LquzoNezbfiihK4PYqgwVD2+O0/Ty7UjN4S\nqzFKVR4OS/6lCJ8YncxoFpTntbvjgojf1DEataKFUN196PAANc3yz8cWHF4uvjPv\nWkgFqbIjb+7D1YgglNyovXkRDlRZl0LD1OQ0ZWhd4Ge1qx8mmmanoBeYZ9+DgpFC\nj9tQAbS867yeOryNe7sEOIpXAAqK/DTu0hB6+ySsDfMo4piXCc2aA/eI2DCuw08e\nw17Dz9WnupZjVdwTKzDhFgJZMLDqn37HQnT6EemLFqbcR0VPEnfyhDtZIQ==\n-----END CERTIFICATE-----"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "jenkins",
+ "image": "${JENKINS_IMAGE}",
+ "env": [
+ {
+ "name": "JENKINS_PASSWORD",
+ "value": "${JENKINS_PASSWORD}"
+ }
+ ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "${JENKINS_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/jenkins"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${JENKINS_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "JENKINS_SERVICE_NAME",
+ "description": "Jenkins service name",
+ "value": "jenkins"
+ },
+ {
+ "name": "JENKINS_IMAGE",
+ "description": "Jenkins Docker image to use",
+ "value": "openshift/jenkins-1-centos7"
+ },
+ {
+ "name": "JENKINS_PASSWORD",
+ "description": "Password for the Jenkins user",
+ "generate": "expression",
+ "value": "password"
+ }
+ ],
+ "labels": {
+ "template": "jenkins-ephemeral-template"
+ }
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/jenkins-persistent-template.json
new file mode 100644
index 000000000..fa31de486
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/jenkins-persistent-template.json
@@ -0,0 +1,173 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins-persistent",
+ "creationTimestamp": null,
+ "annotations": {
+ "description": "Jenkins service, with persistent storage.",
+ "iconClass": "icon-jenkins",
+ "tags": "database,jenkins"
+ }
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "protocol": "TCP",
+ "port": 8080,
+ "targetPort": 8080,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "portalIP": "",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "tls": {
+ "termination": "edge",
+ "certificate": "-----BEGIN CERTIFICATE-----\nMIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBoTELMAkGA1UEBhMCVVMx\nCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0Rl\nZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0ExGjAYBgNVBAMMEXd3\ndy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFtcGxlQGV4YW1wbGUu\nY29tMB4XDTE1MDExMjE0MTk0MVoXDTE2MDExMjE0MTk0MVowfDEYMBYGA1UEAwwP\nd3d3LmV4YW1wbGUuY29tMQswCQYDVQQIDAJTQzELMAkGA1UEBhMCVVMxIjAgBgkq\nhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20xEDAOBgNVBAoMB0V4YW1wbGUx\nEDAOBgNVBAsMB0V4YW1wbGUwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMrv\ngu6ZTTefNN7jjiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm\n47VRx5Qrf/YLXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1M\nmNrQUgZyQC6XIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAGjDTALMAkGA1UdEwQC\nMAAwDQYJKoZIhvcNAQEFBQADggEBAFCi7ZlkMnESvzlZCvv82Pq6S46AAOTPXdFd\nTMvrh12E1sdVALF1P1oYFJzG1EiZ5ezOx88fEDTW+Lxb9anw5/KJzwtWcfsupf1m\nV7J0D3qKzw5C1wjzYHh9/Pz7B1D0KthQRATQCfNf8s6bbFLaw/dmiIUhHLtIH5Qc\nyfrejTZbOSP77z8NOWir+BWWgIDDB2//3AkDIQvT20vmkZRhkqSdT7et4NmXOX/j\njhPti4b2Fie0LeuvgaOdKjCpQQNrYthZHXeVlOLRhMTSk3qUczenkKTOhvP7IS9q\n+Dzv5hqgSfvMG392KWh5f8xXfJNs4W5KLbZyl901MeReiLrPH3w=\n-----END CERTIFICATE-----",
+ "key": "-----BEGIN PRIVATE KEY-----\nMIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMrvgu6ZTTefNN7j\njiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm47VRx5Qrf/YL\nXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1MmNrQUgZyQC6X\nIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAECgYEAnxOjEj/vrLNLMZE1Q9H7PZVF\nWdP/JQVNvQ7tCpZ3ZdjxHwkvf//aQnuxS5yX2Rnf37BS/TZu+TIkK4373CfHomSx\nUTAn2FsLmOJljupgGcoeLx5K5nu7B7rY5L1NHvdpxZ4YjeISrRtEPvRakllENU5y\ngJE8c2eQOx08ZSRE4TkCQQD7dws2/FldqwdjJucYijsJVuUdoTqxP8gWL6bB251q\nelP2/a6W2elqOcWId28560jG9ZS3cuKvnmu/4LG88vZFAkEAzphrH3673oTsHN+d\nuBd5uyrlnGjWjuiMKv2TPITZcWBjB8nJDSvLneHF59MYwejNNEof2tRjgFSdImFH\nmi995wJBAMtPjW6wiqRz0i41VuT9ZgwACJBzOdvzQJfHgSD9qgFb1CU/J/hpSRIM\nkYvrXK9MbvQFvG6x4VuyT1W8mpe1LK0CQAo8VPpffhFdRpF7psXLK/XQ/0VLkG3O\nKburipLyBg/u9ZkaL0Ley5zL5dFBjTV2Qkx367Ic2b0u9AYTCcgi2DsCQQD3zZ7B\nv7BOm7MkylKokY2MduFFXU0Bxg6pfZ7q3rvg8gqhUFbaMStPRYg6myiDiW/JfLhF\nTcFT4touIo7oriFJ\n-----END PRIVATE KEY-----",
+ "caCertificate": "-----BEGIN CERTIFICATE-----\nMIIEFzCCAv+gAwIBAgIJALK1iUpF2VQLMA0GCSqGSIb3DQEBBQUAMIGhMQswCQYD\nVQQGEwJVUzELMAkGA1UECAwCU0MxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoG\nA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEQMA4GA1UECwwHVGVzdCBDQTEaMBgG\nA1UEAwwRd3d3LmV4YW1wbGVjYS5jb20xIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVA\nZXhhbXBsZS5jb20wHhcNMTUwMTEyMTQxNTAxWhcNMjUwMTA5MTQxNTAxWjCBoTEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkx\nHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0Ex\nGjAYBgNVBAMMEXd3dy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFt\ncGxlQGV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA\nw2rK1J2NMtQj0KDug7g7HRKl5jbf0QMkMKyTU1fBtZ0cCzvsF4CqV11LK4BSVWaK\nrzkaXe99IVJnH8KdOlDl5Dh/+cJ3xdkClSyeUT4zgb6CCBqg78ePp+nN11JKuJlV\nIG1qdJpB1J5O/kCLsGcTf7RS74MtqMFo96446Zvt7YaBhWPz6gDaO/TUzfrNcGLA\nEfHVXkvVWqb3gqXUztZyVex/gtP9FXQ7gxTvJml7UkmT0VAFjtZnCqmFxpLZFZ15\n+qP9O7Q2MpsGUO/4vDAuYrKBeg1ZdPSi8gwqUP2qWsGd9MIWRv3thI2903BczDc7\nr8WaIbm37vYZAS9G56E4+wIDAQABo1AwTjAdBgNVHQ4EFgQUugLrSJshOBk5TSsU\nANs4+SmJUGwwHwYDVR0jBBgwFoAUugLrSJshOBk5TSsUANs4+SmJUGwwDAYDVR0T\nBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaMJ33zAMV4korHo5aPfayV3uHoYZ\n1ChzP3eSsF+FjoscpoNSKs91ZXZF6LquzoNezbfiihK4PYqgwVD2+O0/Ty7UjN4S\nqzFKVR4OS/6lCJ8YncxoFpTntbvjgojf1DEataKFUN196PAANc3yz8cWHF4uvjPv\nWkgFqbIjb+7D1YgglNyovXkRDlRZl0LD1OQ0ZWhd4Ge1qx8mmmanoBeYZ9+DgpFC\nj9tQAbS867yeOryNe7sEOIpXAAqK/DTu0hB6+ySsDfMo4piXCc2aA/eI2DCuw08e\nw17Dz9WnupZjVdwTKzDhFgJZMLDqn37HQnT6EemLFqbcR0VPEnfyhDtZIQ==\n-----END CERTIFICATE-----"
+ }
+ }
+ },
+ {
+ "kind": "PersistentVolumeClaim",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "resources": {}
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "jenkins",
+ "image": "${JENKINS_IMAGE}",
+ "env": [
+ {
+ "name": "JENKINS_PASSWORD",
+ "value": "${JENKINS_PASSWORD}"
+ }
+ ],
+ "resources": {},
+ "volumeMounts": [
+ {
+ "name": "${JENKINS_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/jenkins"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${JENKINS_SERVICE_NAME}-data",
+ "persistentVolumeClaim": {
+ "claimName": "${JENKINS_SERVICE_NAME}"
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "JENKINS_SERVICE_NAME",
+ "description": "Jenkins service name",
+ "value": "jenkins"
+ },
+ {
+ "name": "JENKINS_PASSWORD",
+ "description": "Password for the Jenkins user",
+ "generate": "expression",
+ "value": "password"
+ },
+ {
+ "name": "JENKINS_IMAGE",
+ "description": "Jenkins Docker image to use",
+ "value": "openshift/jenkins-1-centos7"
+ },
+ {
+ "name": "VOLUME_CAPACITY",
+ "description": "Volume space available for data, e.g. 512Mi, 2Gi",
+ "value": "512Mi",
+ "required": true
+ }
+ ],
+ "labels": {
+ "template": "jenkins-persistent-template"
+ }
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/nodejs-mongodb.json
new file mode 100644
index 000000000..8760b074c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/nodejs-mongodb.json
@@ -0,0 +1,346 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-mongodb-example",
+ "annotations": {
+ "description": "An example Node.js application with a MongoDB database",
+ "tags": "instant-app,nodejs,mongodb",
+ "iconClass": "icon-nodejs"
+ }
+ },
+ "labels": {
+ "template": "nodejs-mongodb-example"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-mongodb-example",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "nodejs-mongodb-example"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-mongodb-example"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "nodejs-mongodb-example"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-mongodb-example",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-mongodb-example",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "openshift",
+ "name": "nodejs:0.10"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "nodejs-mongodb-example:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-mongodb-example",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "nodejs-mongodb-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "nodejs-mongodb-example:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "nodejs-mongodb-example"
+ },
+ "template": {
+ "metadata": {
+ "name": "nodejs-mongodb-example",
+ "labels": {
+ "name": "nodejs-mongodb-example"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "nodejs-mongodb-example",
+ "image": "nodejs-mongodb-example",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "MONGODB_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DATABASE_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "mongodb",
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "mongodb",
+ "image": "${MONGODB_IMAGE}",
+ "ports": [
+ {
+ "containerPort": 27017
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DATABASE_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "description": "The URL of the repository with your application source code",
+ "value": "https://github.com/openshift/nodejs-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "description": "The exposed hostname that will route to the Node.js service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the GitHub webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the Generic webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "description": "Database service name",
+ "value": "mongodb"
+ },
+ {
+ "name": "DATABASE_USER",
+ "description": "Username for MongoDB user that will be used for accessing the database",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "description": "Password for the MongoDB user",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "description": "Database name",
+ "value": "sampledb"
+ },
+ {
+ "name": "DATABASE_ADMIN_PASSWORD",
+ "description": "Password for the database admin user",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{16}"
+ },
+ {
+ "name": "MONGODB_IMAGE",
+ "description": "Image to use for mongodb",
+ "value": "openshift/mongodb-24-centos7"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/quickstart-templates/nodejs.json b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/nodejs.json
new file mode 100644
index 000000000..e047266e3
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/nodejs.json
@@ -0,0 +1,248 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-example",
+ "annotations": {
+ "description": "An example Node.js application with no database",
+ "tags": "instant-app,nodejs",
+ "iconClass": "icon-nodejs"
+ }
+ },
+ "labels": {
+ "template": "nodejs-example"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-example",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "nodejs-example"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-example"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "nodejs-example"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-example",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-example",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "openshift",
+ "name": "nodejs:0.10"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "nodejs-example:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "nodejs-example",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "nodejs-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "nodejs-example:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "nodejs-example"
+ },
+ "template": {
+ "metadata": {
+ "name": "nodejs-example",
+ "labels": {
+ "name": "nodejs-example"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "nodejs-example",
+ "image": "nodejs-example",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "MONGODB_USER",
+ "value": "${MONGODB_USER}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${MONGODB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${MONGODB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${MONGODB_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "description": "The URL of the repository with your application source code",
+ "value": "https://github.com/openshift/nodejs-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "description": "The exposed hostname that will route to the Node.js service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the GitHub webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the Generic webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "description": "Database service name"
+ },
+ {
+ "name": "MONGODB_USER",
+ "description": "Username for MongoDB user that will be used for accessing the database"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "description": "Password for the MongoDB user"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "description": "Database name"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "description": "Password for the database admin user"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/rails-postgresql.json
new file mode 100644
index 000000000..b98282528
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/quickstart-templates/rails-postgresql.json
@@ -0,0 +1,402 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "rails-postgresql-example",
+ "annotations": {
+ "description": "An example Rails application with a PostgreSQL database",
+ "tags": "instant-app,ruby,rails,postgresql",
+ "iconClass": "icon-ruby"
+ }
+ },
+ "labels": {
+ "template": "rails-postgresql-example"
+ },
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "rails-postgresql-example",
+ "annotations": {
+ "description": "Exposes and load balances the application pods"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "name": "rails-postgresql-example"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "rails-postgresql-example"
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "kind": "Service",
+ "name": "rails-postgresql-example"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "rails-postgresql-example",
+ "annotations": {
+ "description": "Keeps track of changes in the application image"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "rails-postgresql-example",
+ "annotations": {
+ "description": "Defines how to build the application"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "openshift",
+ "name": "ruby:2.0"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "rails-postgresql-example:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange"
+ },
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "rails-postgresql-example",
+ "annotations": {
+ "description": "Defines how to deploy the application server"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate",
+ "recreateParams": {
+ "pre": {
+ "failurePolicy": "Abort",
+ "execNewPod": {
+ "command": [
+ "./migrate-database.sh"
+ ],
+ "containerName": "rails-postgresql-example"
+ }
+ }
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "rails-postgresql-example"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "rails-postgresql-example:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "rails-postgresql-example"
+ },
+ "template": {
+ "metadata": {
+ "name": "rails-postgresql-example",
+ "labels": {
+ "name": "rails-postgresql-example"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "rails-postgresql-example",
+ "image": "rails-postgresql-example",
+ "ports": [
+ {
+ "containerPort": 8080
+ }
+ ],
+ "env": [
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "value": "${DATABASE_SERVICE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "value": "${SECRET_KEY_BASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "value": "${SECRET_KEY_BASE}"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "value": "${APPLICATION_DOMAIN}"
+ },
+ {
+ "name": "APPLICATION_USER",
+ "value": "${APPLICATION_USER}"
+ },
+ {
+ "name": "APPLICATION_PASSWORD",
+ "value": "${APPLICATION_PASSWORD}"
+ },
+ {
+ "name": "RAILS_ENV",
+ "value": "${RAILS_ENV}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Exposes the database server"
+ }
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "postgresql",
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "annotations": {
+ "description": "Defines how to deploy the database"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${DATABASE_SERVICE_NAME}",
+ "labels": {
+ "name": "${DATABASE_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "postgresql",
+ "image": "${POSTGRESQL_IMAGE}",
+ "ports": [
+ {
+ "containerPort": 5432
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DATABASE_USER}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DATABASE_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DATABASE_NAME}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "SOURCE_REPOSITORY_URL",
+ "description": "The URL of the repository with your application source code",
+ "value": "https://github.com/openshift/rails-ex.git"
+ },
+ {
+ "name": "SOURCE_REPOSITORY_REF",
+ "description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch"
+ },
+ {
+ "name": "CONTEXT_DIR",
+ "description": "Set this to the relative path to your project if it is not in the root of your repository"
+ },
+ {
+ "name": "APPLICATION_DOMAIN",
+ "description": "The exposed hostname that will route to the Rails service, if left blank a value will be defaulted.",
+ "value": ""
+ },
+ {
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "description": "A secret string used to configure the GitHub webhook",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{40}"
+ },
+ {
+ "name": "SECRET_KEY_BASE",
+ "description": "Your secret key for verifying the integrity of signed cookies",
+ "generate": "expression",
+ "from": "[a-z0-9]{127}"
+ },
+ {
+ "name": "APPLICATION_USER",
+ "description": "The application user that is used within the sample application to authorize access on pages",
+ "value": "openshift"
+ },
+ {
+ "name": "APPLICATION_PASSWORD",
+ "description": "The application password that is used within the sample application to authorize access on pages",
+ "value": "secret"
+ },
+ {
+ "name": "RAILS_ENV",
+ "description": "Environment under which the sample application will run. Could be set to production, development or test",
+ "value": "production"
+ },
+ {
+ "name": "DATABASE_SERVICE_NAME",
+ "description": "Database service name",
+ "value": "postgresql"
+ },
+ {
+ "name": "DATABASE_USER",
+ "description": "database username",
+ "generate": "expression",
+ "from": "user[A-Z0-9]{3}"
+ },
+ {
+ "name": "DATABASE_PASSWORD",
+ "description": "database password",
+ "generate": "expression",
+ "from": "[a-zA-Z0-9]{8}"
+ },
+ {
+ "name": "DATABASE_NAME",
+ "description": "database name",
+ "value": "root"
+ },
+ {
+ "name": "POSTGRESQL_IMAGE",
+ "description": "Image to use for postgresql",
+ "value": "openshift/postgresql-92-centos7"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "description": "database max connections",
+ "value": "10"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "description": "database shared buffers",
+ "value": "12MB"
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v1.1/xpaas-streams/jboss-image-streams.json
new file mode 100644
index 000000000..aaf5569ae
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-streams/jboss-image-streams.json
@@ -0,0 +1,107 @@
+{
+ "kind": "List",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-image-streams",
+ "annotations": {
+ "description": "ImageStream definitions for JBoss Middleware products."
+ }
+ },
+ "items": [
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-webserver30-tomcat7-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat7-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 7 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat7,java,jboss,xpaas",
+ "supports":"tomcat7:3.0,tomcat:7,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.1"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-webserver30-tomcat8-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-webserver-3/webserver30-tomcat8-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss Web Server 3.0 Tomcat 8 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,tomcat,tomcat8,java,jboss,xpaas",
+ "supports":"tomcat8:3.0,tomcat:8,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "sampleContextDir": "tomcat-websocket-chat",
+ "version": "1.1"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-eap64-openshift"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-eap-6/eap64-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss EAP 6.4 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:6.4,javaee:6,java:8,xpaas:1.1",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "6.4.x",
+ "version": "1.1"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jboss-amq-62"
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-amq-6/amq62-openshift",
+ "tags": [
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.1",
+ "version": "1.1"
+ }
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-basic.json
new file mode 100644
index 000000000..3fd04c28c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-basic.json
@@ -0,0 +1,325 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "amq62-basic"
+ },
+ "labels": {
+ "template": "amq62-basic",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "User name for admin user. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for admin user. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "AMQ_ADMIN_USERNAME",
+ "value": "${AMQ_ADMIN_USERNAME}"
+ },
+ {
+ "name": "AMQ_ADMIN_PASSWORD",
+ "value": "${AMQ_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent-ssl.json
new file mode 100644
index 000000000..aa9e716cf
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent-ssl.json
@@ -0,0 +1,521 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "amq62-persistent-ssl"
+ },
+ "labels": {
+ "template": "amq62-persistent-ssl",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "User name for admin user. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for admin user. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Name of a secret containing SSL related files",
+ "name": "AMQ_SECRET",
+ "value": "amq-app-secret",
+ "required": true
+ },
+ {
+ "description": "SSL trust store filename",
+ "name": "AMQ_TRUSTSTORE",
+ "value": "broker.ts",
+ "required": true
+ },
+ {
+ "description": "SSL trust store password",
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "SSL key store filename",
+ "name": "AMQ_KEYSTORE",
+ "value": "broker.ks",
+ "required": true
+ },
+ {
+ "description": "Password for accessing SSL keystore",
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5671,
+ "targetPort": 5671
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8883,
+ "targetPort": 8883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61612,
+ "targetPort": 61612
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61617,
+ "targetPort": 61617
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire (SSL) port."
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "amq-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "broker-secret-volume",
+ "mountPath": "/etc/amq-secret-volume",
+ "readOnly": true
+ },
+ {
+ "mountPath": "/opt/amq/data/kahadb",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt-ssl",
+ "containerPort": 8883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "AMQ_ADMIN_USERNAME",
+ "value": "${AMQ_ADMIN_USERNAME}"
+ },
+ {
+ "name": "AMQ_ADMIN_PASSWORD",
+ "value": "${AMQ_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
+ "value": "/etc/amq-secret-volume"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE",
+ "value": "${AMQ_TRUSTSTORE}"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "${AMQ_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_KEYSTORE",
+ "value": "${AMQ_KEYSTORE}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "${AMQ_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "broker-secret-volume",
+ "secret": {
+ "secretName": "${AMQ_SECRET}"
+ }
+ },
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent.json
new file mode 100644
index 000000000..3a2db3ce9
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-persistent.json
@@ -0,0 +1,343 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "amq62-persistent"
+ },
+ "labels": {
+ "template": "amq62-persistent",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "User name for admin user. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for admin user. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data/kahadb",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "AMQ_ADMIN_USERNAME",
+ "value": "${AMQ_ADMIN_USERNAME}"
+ },
+ {
+ "name": "AMQ_ADMIN_PASSWORD",
+ "value": "${AMQ_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-ssl.json
new file mode 100644
index 000000000..f61fb24c2
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/amq62-ssl.json
@@ -0,0 +1,507 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "amq62-ssl"
+ },
+ "labels": {
+ "template": "amq62-ssl",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "broker",
+ "required": true
+ },
+ {
+ "description": "Protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. SSL variants of these protocols will be configured automaticaly.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. If left empty, queues will be still created dynamically.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. If left empty, topics will be still created dynamically.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "User name for admin user. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for admin user. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Name of a secret containing SSL related files",
+ "name": "AMQ_SECRET",
+ "value": "amq-app-secret",
+ "required": true
+ },
+ {
+ "description": "SSL trust store filename",
+ "name": "AMQ_TRUSTSTORE",
+ "value": "broker.ts",
+ "required": true
+ },
+ {
+ "description": "SSL trust store password",
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "SSL key store filename",
+ "name": "AMQ_KEYSTORE",
+ "value": "broker.ks",
+ "required": true
+ },
+ {
+ "description": "Password for accessing SSL keystore",
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "",
+ "required": true
+ },
+ {
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5672,
+ "targetPort": 5672
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5671,
+ "targetPort": 5671
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-amqp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's AMQP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 1883,
+ "targetPort": 1883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8883,
+ "targetPort": 8883
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-mqtt-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's MQTT SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61613,
+ "targetPort": 61613
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61612,
+ "targetPort": 61612
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-stomp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's STOMP SSL port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61617,
+ "targetPort": 61617
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp-ssl",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire (SSL) port."
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "amq-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "broker-secret-volume",
+ "mountPath": "/etc/amq-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt-ssl",
+ "containerPort": 8883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "AMQ_ADMIN_USERNAME",
+ "value": "${AMQ_ADMIN_USERNAME}"
+ },
+ {
+ "name": "AMQ_ADMIN_PASSWORD",
+ "value": "${AMQ_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_KEYSTORE_TRUSTSTORE_DIR",
+ "value": "/etc/amq-secret-volume"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE",
+ "value": "${AMQ_TRUSTSTORE}"
+ },
+ {
+ "name": "AMQ_TRUSTSTORE_PASSWORD",
+ "value": "${AMQ_TRUSTSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_KEYSTORE",
+ "value": "${AMQ_KEYSTORE}"
+ },
+ {
+ "name": "AMQ_KEYSTORE_PASSWORD",
+ "value": "${AMQ_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "broker-secret-volume",
+ "secret": {
+ "secretName": "${AMQ_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-persistent-s2i.json
new file mode 100644
index 000000000..2fc3b5b25
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-persistent-s2i.json
@@ -0,0 +1,659 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 A-MQ applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "eap64-amq-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap64-amq-persistent-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "EAP_HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "EAP_HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "User name for broker admin. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for broker admin. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "${EAP_HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "EAP_HTTPS_NAME",
+ "value": "${EAP_HTTPS_NAME}"
+ },
+ {
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "${EAP_HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${EAP_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data/kahadb",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "AMQ_ADMIN_USERNAME",
+ "value": "${AMQ_ADMIN_USERNAME}"
+ },
+ {
+ "name": "AMQ_ADMIN_PASSWORD",
+ "value": "${AMQ_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-s2i.json
new file mode 100644
index 000000000..a420bb1ea
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-amq-s2i.json
@@ -0,0 +1,619 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,amq,javaee,java,messaging,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "eap64-amq-s2i"
+ },
+ "labels": {
+ "template": "eap64-amq-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "EAP_HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "EAP_HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "description": "User name for broker admin. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Password for broker admin. If left empty, it will be generated.",
+ "name": "AMQ_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "${EAP_HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "EAP_HTTPS_NAME",
+ "value": "${EAP_HTTPS_NAME}"
+ },
+ {
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "${EAP_HTTPS_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${EAP_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -L -u ${AMQ_ADMIN_USERNAME}:${AMQ_ADMIN_PASSWORD} 'http://localhost:8161/hawtio/jolokia/read/org.apache.activemq:type=Broker,brokerName=*,service=Health/CurrentStatus' | grep -q '\"CurrentStatus\" *: *\"Good\"'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "AMQ_ADMIN_USERNAME",
+ "value": "${AMQ_ADMIN_USERNAME}"
+ },
+ {
+ "name": "AMQ_ADMIN_PASSWORD",
+ "value": "${AMQ_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-basic-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-basic-s2i.json
new file mode 100644
index 000000000..3f90eb8be
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-basic-s2i.json
@@ -0,0 +1,305 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 6 applications built using S2I.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "eap64-basic-s2i"
+ },
+ "labels": {
+ "template": "eap64-basic-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "6.4.x",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "kitchensink",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-https-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-https-s2i.json
new file mode 100644
index 000000000..220d2f5b9
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-https-s2i.json
@@ -0,0 +1,413 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 6 applications built using S2I.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "eap64-https-s2i"
+ },
+ "labels": {
+ "template": "eap64-https-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "6.4.x",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "kitchensink",
+ "required": false
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "EAP_HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "EAP_HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "${EAP_HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "EAP_HTTPS_NAME",
+ "value": "${EAP_HTTPS_NAME}"
+ },
+ {
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "${EAP_HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${EAP_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..a1a3a9f2c
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-persistent-s2i.json
@@ -0,0 +1,669 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 MongDB applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "eap64-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap64-mongodb-persistent-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "EAP_HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "EAP_HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "${EAP_HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "EAP_HTTPS_NAME",
+ "value": "${EAP_HTTPS_NAME}"
+ },
+ {
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "${EAP_HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${EAP_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-s2i.json
new file mode 100644
index 000000000..dfd1443ed
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mongodb-s2i.json
@@ -0,0 +1,629 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 MongDB applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mongodb,javaee,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "eap64-mongodb-s2i"
+ },
+ "labels": {
+ "template": "eap64-mongodb-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "EAP_HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "EAP_HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "${EAP_HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "EAP_HTTPS_NAME",
+ "value": "${EAP_HTTPS_NAME}"
+ },
+ {
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "${EAP_HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${EAP_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-persistent-s2i.json
new file mode 100644
index 000000000..fdd368a5f
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-persistent-s2i.json
@@ -0,0 +1,676 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "eap64-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap64-mysql-persistent-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "EAP_HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "EAP_HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "${EAP_HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "EAP_HTTPS_NAME",
+ "value": "${EAP_HTTPS_NAME}"
+ },
+ {
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "${EAP_HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${EAP_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-s2i.json
new file mode 100644
index 000000000..ff6bdc112
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-mysql-s2i.json
@@ -0,0 +1,636 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,mysql,javaee,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "eap64-mysql-s2i"
+ },
+ "labels": {
+ "template": "eap64-mysql-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "EAP_HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "EAP_HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "${EAP_HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "EAP_HTTPS_NAME",
+ "value": "${EAP_HTTPS_NAME}"
+ },
+ {
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "${EAP_HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${EAP_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..6443afdb0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-persistent-s2i.json
@@ -0,0 +1,649 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "eap64-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap64-postgresql-persistent-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "EAP_HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "EAP_HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "${EAP_HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "EAP_HTTPS_NAME",
+ "value": "${EAP_HTTPS_NAME}"
+ },
+ {
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "${EAP_HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${EAP_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-s2i.json
new file mode 100644
index 000000000..e879e51cf
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/eap64-postgresql-s2i.json
@@ -0,0 +1,609 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 6 PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,postgresql,javaee,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "eap64-postgresql-s2i"
+ },
+ "labels": {
+ "template": "eap64-postgresql-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the keystore file",
+ "name": "EAP_HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "description": "The name of the keystore file within the secret",
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "description": "The name associated with the server certificate",
+ "name": "EAP_HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "The password for the keystore and certificate",
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap64-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "eap-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "EAP_HTTPS_KEYSTORE",
+ "value": "${EAP_HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "EAP_HTTPS_NAME",
+ "value": "${EAP_HTTPS_NAME}"
+ },
+ {
+ "name": "EAP_HTTPS_PASSWORD",
+ "value": "${EAP_HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${EAP_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-basic-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-basic-s2i.json
new file mode 100644
index 000000000..729079130
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-basic-s2i.json
@@ -0,0 +1,279 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat7-basic-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-basic-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-https-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-https-s2i.json
new file mode 100644
index 000000000..7ce7e7fe2
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-https-s2i.json
@@ -0,0 +1,387 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat7,java,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat7-https-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-https-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..9a08ec0b0
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-persistent-s2i.json
@@ -0,0 +1,643 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat7-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-mongodb-persistent-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
new file mode 100644
index 000000000..b8dfb3ad3
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mongodb-s2i.json
@@ -0,0 +1,603 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications built using S2I.",
+ "tags": "tomcat,tomcat7,mongodb,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat7-mongodb-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-mongodb-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
new file mode 100644
index 000000000..d36e330d3
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-persistent-s2i.json
@@ -0,0 +1,645 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat7-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-mysql-persistent-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-s2i.json
new file mode 100644
index 000000000..f5309db60
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-mysql-s2i.json
@@ -0,0 +1,605 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications built using S2I.",
+ "tags": "tomcat,tomcat7,mysql,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat7-mysql-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-mysql-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..ee88a4c69
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-persistent-s2i.json
@@ -0,0 +1,618 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat7-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-postgresql-persistent-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
new file mode 100644
index 000000000..f5940a7a1
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat7-postgresql-s2i.json
@@ -0,0 +1,578 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications built using S2I.",
+ "tags": "tomcat,tomcat7,postgresql,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat7-postgresql-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat7-postgresql-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat7-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-basic-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-basic-s2i.json
new file mode 100644
index 000000000..b24ce40ae
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-basic-s2i.json
@@ -0,0 +1,279 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat8-basic-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-basic-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-https-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-https-s2i.json
new file mode 100644
index 000000000..7e788d0db
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-https-s2i.json
@@ -0,0 +1,387 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS applications built using S2I.",
+ "tags": "tomcat,tomcat8,java,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat8-https-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-https-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "tomcat-websocket-chat",
+ "required": false
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..2f1d69c75
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-persistent-s2i.json
@@ -0,0 +1,643 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat8-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-mongodb-persistent-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
new file mode 100644
index 000000000..bad676f2e
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mongodb-s2i.json
@@ -0,0 +1,603 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MongoDB applications built using S2I.",
+ "tags": "tomcat,tomcat8,mongodb,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat8-mongodb-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-mongodb-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
new file mode 100644
index 000000000..e20a45982
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-persistent-s2i.json
@@ -0,0 +1,645 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat8-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-mysql-persistent-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-s2i.json
new file mode 100644
index 000000000..1b9624756
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-mysql-s2i.json
@@ -0,0 +1,605 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS MySQL applications built using S2I.",
+ "tags": "tomcat,tomcat8,mysql,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat8-mysql-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-mysql-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..dc492a38e
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-persistent-s2i.json
@@ -0,0 +1,618 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications with persistent storage built using S2I.",
+ "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat8-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-postgresql-persistent-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "jws-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-s2i.json b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
new file mode 100644
index 000000000..242b37a79
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.1/xpaas-templates/jws30-tomcat8-postgresql-s2i.json
@@ -0,0 +1,576 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-tomcat",
+ "description": "Application template for JWS PostgreSQL applications built using S2I.",
+ "tags": "tomcat,tomcat8,postgresql,java,database,jboss,xpaas",
+ "version": "1.1.0"
+ },
+ "name": "jws30-tomcat8-postgresql-s2i"
+ },
+ "labels": {
+ "template": "jws30-tomcat8-postgresql-s2i",
+ "xpaas": "1.1.0"
+ },
+ "parameters": [
+ {
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "jws-app",
+ "required": true
+ },
+ {
+ "description": "Custom hostname for service routes. Leave blank for default hostname, e.g.: <application-name>.<project>.<default-domain-suffix>",
+ "name": "APPLICATION_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.1",
+ "required": false
+ },
+ {
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "description": "The name of the secret containing the certificate files",
+ "name": "JWS_HTTPS_SECRET",
+ "value": "jws-app-secret",
+ "required": true
+ },
+ {
+ "description": "The name of the certificate file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "server.crt",
+ "required": false
+ },
+ {
+ "description": "The name of the certificate key file within the secret",
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "server.key",
+ "required": false
+ },
+ {
+ "description": "The certificate password",
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin User",
+ "name": "JWS_ADMIN_USERNAME",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "JWS Admin Password",
+ "name": "JWS_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${APPLICATION_DOMAIN}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-webserver30-tomcat8-openshift:1.1"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStream",
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccount": "jws-service-account",
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "curl -s -u ${JWS_ADMIN_USERNAME}:${JWS_ADMIN_PASSWORD} 'http://localhost:8080/manager/jmxproxy/?get=Catalina%3Atype%3DServer&att=stateName' |grep -iq 'stateName *= *STARTED'"
+ ]
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "jws-certificate-volume",
+ "mountPath": "/etc/jws-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_DIR",
+ "value": "/etc/jws-secret-volume"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE",
+ "value": "${JWS_HTTPS_CERTIFICATE}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_KEY",
+ "value": "${JWS_HTTPS_CERTIFICATE_KEY}"
+ },
+ {
+ "name": "JWS_HTTPS_CERTIFICATE_PASSWORD",
+ "value": "${JWS_HTTPS_CERTIFICATE_PASSWORD}"
+ },
+ {
+ "name": "JWS_ADMIN_USERNAME",
+ "value": "${JWS_ADMIN_USERNAME}"
+ },
+ {
+ "name": "JWS_ADMIN_PASSWORD",
+ "value": "${JWS_ADMIN_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "jws-certificate-volume",
+ "secret": {
+ "secretName": "${JWS_HTTPS_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+} \ No newline at end of file
diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml
index 0b4784bae..9a5eebc66 100644
--- a/roles/openshift_examples/tasks/main.yml
+++ b/roles/openshift_examples/tasks/main.yml
@@ -1,8 +1,8 @@
---
- name: Copy openshift examples
copy:
- src: examples
- dest: /usr/share/openshift
+ src: "examples/{{ content_version }}/"
+ dest: "{{ examples_base }}/"
# RHEL and Centos image streams are mutually exclusive
- name: Import RHEL streams
diff --git a/roles/openshift_expand_partition/README.md b/roles/openshift_expand_partition/README.md
index cd394e1ba..aed4ec871 100644
--- a/roles/openshift_expand_partition/README.md
+++ b/roles/openshift_expand_partition/README.md
@@ -8,7 +8,7 @@ partition, and then expanding the file system on the partition.
* A machine with a disk that is not fully utilized
-* cloud-utils-growpart rpm (either installed or avialable via yum)
+* cloud-utils-growpart rpm (either installed or avialable via yum or dnf)
* The partition you are expanding needs to be at the end of the partition list
diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml
index 8bc399070..42e7903fd 100644
--- a/roles/openshift_expand_partition/tasks/main.yml
+++ b/roles/openshift_expand_partition/tasks/main.yml
@@ -1,6 +1,11 @@
---
- name: Ensure growpart is installed
yum: pkg=cloud-utils-growpart state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Ensure growpart is installed
+ dnf: pkg=cloud-utils-growpart state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Grow the partitions
command: "growpart {{oep_drive}} {{oep_partition}}"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 2e1075aca..8b3402729 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -24,8 +24,23 @@ import StringIO
import yaml
from distutils.util import strtobool
from distutils.version import LooseVersion
-from netaddr import IPNetwork
+import struct
+import socket
+def first_ip(network):
+ """ Return the first IPv4 address in network
+
+ Args:
+ network (str): network in CIDR format
+ Returns:
+ str: first IPv4 address
+ """
+ atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0]
+ itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr))
+
+ (address, netmask) = network.split('/')
+ netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
+ return itoa((atoi(address) & netmask_i) + 1)
def hostname_valid(hostname):
""" Test if specified hostname should be considered valid
@@ -513,9 +528,9 @@ def set_aggregate_facts(facts):
internal_hostnames.add(facts['common']['hostname'])
internal_hostnames.add(facts['common']['ip'])
+ cluster_domain = facts['common']['dns_domain']
+
if 'master' in facts:
- # FIXME: not sure why but facts['dns']['domain'] fails
- cluster_domain = 'cluster.local'
if 'cluster_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
@@ -525,7 +540,7 @@ def set_aggregate_facts(facts):
'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
all_hostnames.update(svc_names)
internal_hostnames.update(svc_names)
- first_svc_ip = str(IPNetwork(facts['master']['portal_net'])[1])
+ first_svc_ip = first_ip(facts['master']['portal_net'])
all_hostnames.add(first_svc_ip)
internal_hostnames.add(first_svc_ip)
@@ -543,8 +558,10 @@ def set_etcd_facts_if_unset(facts):
If anything goes wrong parsing these, the fact will not be set.
"""
- if 'etcd' in facts:
- if 'master' in facts and facts['master']['embedded_etcd']:
+ if 'master' in facts and facts['master']['embedded_etcd']:
+ etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
+
+ if 'etcd_data_dir' not in etcd_facts:
try:
# Parse master config to find actual etcd data dir:
master_cfg_path = os.path.join(facts['common']['config_base'],
@@ -553,28 +570,37 @@ def set_etcd_facts_if_unset(facts):
config = yaml.safe_load(master_cfg_f.read())
master_cfg_f.close()
- facts['etcd']['etcd_data_dir'] = \
+ etcd_facts['etcd_data_dir'] = \
config['etcdConfig']['storageDirectory']
+
+ facts['etcd'] = etcd_facts
+
# We don't want exceptions bubbling up here:
# pylint: disable=broad-except
except Exception:
pass
- else:
- # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
- try:
- # Add a fake section for parsing:
- ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read()
- ini_fp = StringIO.StringIO(ini_str)
- config = ConfigParser.RawConfigParser()
- config.readfp(ini_fp)
- etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
- if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
- etcd_data_dir = etcd_data_dir[1:-1]
- facts['etcd']['etcd_data_dir'] = etcd_data_dir
- # We don't want exceptions bubbling up here:
- # pylint: disable=broad-except
- except Exception:
- pass
+ else:
+ etcd_facts = facts['etcd'] if 'etcd' in facts else dict()
+
+ # Read ETCD_DATA_DIR from /etc/etcd/etcd.conf:
+ try:
+ # Add a fake section for parsing:
+ ini_str = '[root]\n' + open('/etc/etcd/etcd.conf', 'r').read()
+ ini_fp = StringIO.StringIO(ini_str)
+ config = ConfigParser.RawConfigParser()
+ config.readfp(ini_fp)
+ etcd_data_dir = config.get('root', 'ETCD_DATA_DIR')
+ if etcd_data_dir.startswith('"') and etcd_data_dir.endswith('"'):
+ etcd_data_dir = etcd_data_dir[1:-1]
+
+ etcd_facts['etcd_data_dir'] = etcd_data_dir
+ facts['etcd'] = etcd_facts
+
+ # We don't want exceptions bubbling up here:
+ # pylint: disable=broad-except
+ except Exception:
+ pass
+
return facts
def set_deployment_facts_if_unset(facts):
@@ -597,7 +623,7 @@ def set_deployment_facts_if_unset(facts):
service_type = 'atomic-openshift'
if deployment_type == 'origin':
service_type = 'origin'
- elif deployment_type in ['enterprise', 'online']:
+ elif deployment_type in ['enterprise']:
service_type = 'openshift'
facts['common']['service_type'] = service_type
if 'config_base' not in facts['common']:
@@ -625,7 +651,7 @@ def set_deployment_facts_if_unset(facts):
if deployment_type in ['enterprise', 'online', 'openshift-enterprise']:
registry_url = 'openshift3/ose-${component}:${version}'
elif deployment_type == 'atomic-enterprise':
- registry_url = 'aep3/aep-${component}:${version}'
+ registry_url = 'aep3_beta/aep-${component}:${version}'
facts[role]['registry_url'] = registry_url
if 'master' in facts:
@@ -838,20 +864,38 @@ def apply_provider_facts(facts, provider_facts):
return facts
-def merge_facts(orig, new):
+def merge_facts(orig, new, additive_facts_to_overwrite):
""" Recursively merge facts dicts
Args:
orig (dict): existing facts
new (dict): facts to update
+
+ additive_facts_to_overwrite (list): additive facts to overwrite in jinja
+ '.' notation ex: ['master.named_certificates']
+
Returns:
dict: the merged facts
"""
+ additive_facts = ['named_certificates']
facts = dict()
for key, value in orig.iteritems():
if key in new:
if isinstance(value, dict) and isinstance(new[key], dict):
- facts[key] = merge_facts(value, new[key])
+ relevant_additive_facts = []
+ # Keep additive_facts_to_overwrite if key matches
+ for item in additive_facts_to_overwrite:
+ if '.' in item and item.startswith(key + '.'):
+ relevant_additive_facts.append(item)
+ facts[key] = merge_facts(value, new[key], relevant_additive_facts)
+ elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
+ # Fact is additive so we'll combine orig and new.
+ if isinstance(value, list) and isinstance(new[key], list):
+ new_fact = []
+ for item in copy.deepcopy(value) + copy.copy(new[key]):
+ if item not in new_fact:
+ new_fact.append(item)
+ facts[key] = new_fact
else:
facts[key] = copy.copy(new[key])
else:
@@ -935,13 +979,15 @@ class OpenShiftFacts(object):
role (str): role for setting local facts
filename (str): local facts file to use
local_facts (dict): local facts to set
+ additive_facts_to_overwrite (list): additive facts to overwrite in jinja
+ '.' notation ex: ['master.named_certificates']
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
- known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'dns', 'etcd']
+ known_roles = ['common', 'master', 'node', 'master_sdn', 'node_sdn', 'etcd']
- def __init__(self, role, filename, local_facts):
+ def __init__(self, role, filename, local_facts, additive_facts_to_overwrite=False):
self.changed = False
self.filename = filename
if role not in self.known_roles:
@@ -950,25 +996,27 @@ class OpenShiftFacts(object):
)
self.role = role
self.system_facts = ansible_facts(module)
- self.facts = self.generate_facts(local_facts)
+ self.facts = self.generate_facts(local_facts, additive_facts_to_overwrite)
- def generate_facts(self, local_facts):
+ def generate_facts(self, local_facts, additive_facts_to_overwrite):
""" Generate facts
Args:
local_facts (dict): local_facts for overriding generated
defaults
+ additive_facts_to_overwrite (list): additive facts to overwrite in jinja
+ '.' notation ex: ['master.named_certificates']
Returns:
dict: The generated facts
"""
- local_facts = self.init_local_facts(local_facts)
+ local_facts = self.init_local_facts(local_facts, additive_facts_to_overwrite)
roles = local_facts.keys()
defaults = self.get_defaults(roles)
provider_facts = self.init_provider_facts()
facts = apply_provider_facts(defaults, provider_facts)
- facts = merge_facts(facts, local_facts)
+ facts = merge_facts(facts, local_facts, additive_facts_to_overwrite)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_project_cfg_facts_if_unset(facts)
@@ -1005,9 +1053,10 @@ class OpenShiftFacts(object):
common = dict(use_openshift_sdn=True, ip=ip_addr, public_ip=ip_addr,
deployment_type='origin', hostname=hostname,
- public_hostname=hostname)
+ public_hostname=hostname, use_manageiq=False)
common['client_binary'] = 'oc' if os.path.isfile('/usr/bin/oc') else 'osc'
common['admin_binary'] = 'oadm' if os.path.isfile('/usr/bin/oadm') else 'osadm'
+ common['dns_domain'] = 'cluster.local'
defaults['common'] = common
if 'master' in roles:
@@ -1026,9 +1075,8 @@ class OpenShiftFacts(object):
if 'node' in roles:
node = dict(labels={}, annotations={}, portal_net='172.30.0.0/16',
- iptables_sync_period='5s')
+ iptables_sync_period='5s', set_node_ip=False)
defaults['node'] = node
-
return defaults
def guess_host_provider(self):
@@ -1106,11 +1154,13 @@ class OpenShiftFacts(object):
)
return provider_facts
- def init_local_facts(self, facts=None):
+ def init_local_facts(self, facts=None, additive_facts_to_overwrite=False):
""" Initialize the provider facts
Args:
facts (dict): local facts to set
+ additive_facts_to_overwrite (list): additive facts to overwrite in jinja
+ '.' notation ex: ['master.named_certificates']
Returns:
dict: The result of merging the provided facts with existing
@@ -1128,7 +1178,7 @@ class OpenShiftFacts(object):
basestring):
facts_to_set[arg] = module.from_json(facts_to_set[arg])
- new_local_facts = merge_facts(local_facts, facts_to_set)
+ new_local_facts = merge_facts(local_facts, facts_to_set, additive_facts_to_overwrite)
for facts in new_local_facts.values():
keys_to_delete = []
for fact, value in facts.iteritems():
@@ -1158,6 +1208,7 @@ def main():
role=dict(default='common', required=False,
choices=OpenShiftFacts.known_roles),
local_facts=dict(default=None, type='dict', required=False),
+ additive_facts_to_overwrite=dict(default=[], type='list', required=False),
),
supports_check_mode=True,
add_file_common_args=True,
@@ -1165,9 +1216,10 @@ def main():
role = module.params['role']
local_facts = module.params['local_facts']
+ additive_facts_to_overwrite = module.params['additive_facts_to_overwrite']
fact_file = '/etc/ansible/facts.d/openshift.fact'
- openshift_facts = OpenShiftFacts(role, fact_file, local_facts)
+ openshift_facts = OpenShiftFacts(role, fact_file, local_facts, additive_facts_to_overwrite)
file_params = module.params.copy()
file_params['path'] = fact_file
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index a28aa7ba2..2e889d7d5 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -6,10 +6,16 @@
- ansible_version | version_compare('1.9.0', 'ne')
- ansible_version | version_compare('1.9.0.1', 'ne')
-- name: Ensure python-netaddr and PyYaml are installed
+- name: Ensure PyYaml is installed
yum: pkg={{ item }} state=installed
+ when: ansible_pkg_mgr == "yum"
+ with_items:
+ - PyYAML
+
+- name: Ensure PyYaml is installed
+ dnf: pkg={{ item }} state=installed
+ when: ansible_pkg_mgr == "dnf"
with_items:
- - python-netaddr
- PyYAML
- name: Gather Cluster facts
diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml
new file mode 100644
index 000000000..2d3187e21
--- /dev/null
+++ b/roles/openshift_manageiq/tasks/main.yaml
@@ -0,0 +1,50 @@
+---
+- name: Copy Configuration to temporary conf
+ command: >
+ cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{manage_iq_tmp_conf}}
+ changed_when: false
+
+- name: Add Managment Infrastructure project
+ command: >
+ {{ openshift.common.admin_binary }} new-project
+ management-infra
+ --description="Management Infrastructure"
+ --config={{manage_iq_tmp_conf}}
+ register: osmiq_create_mi_project
+ failed_when: "'already exists' not in osmiq_create_mi_project.stderr and osmiq_create_mi_project.rc != 0"
+ changed_when: osmiq_create_mi_project.rc == 0
+
+- name: Create Service Account
+ shell: >
+ echo {{ manageiq_service_account | to_json | quote }} |
+ {{ openshift.common.client_binary }} create
+ -n management-infra
+ --config={{manage_iq_tmp_conf}}
+ -f -
+ register: osmiq_create_service_account
+ failed_when: "'already exists' not in osmiq_create_service_account.stderr and osmiq_create_service_account.rc != 0"
+ changed_when: osmiq_create_service_account.rc == 0
+
+- name: Create Cluster Role
+ shell: >
+ echo {{ manageiq_cluster_role | to_json | quote }} |
+ {{ openshift.common.client_binary }} create
+ --config={{manage_iq_tmp_conf}}
+ -f -
+ register: osmiq_create_cluster_role
+ failed_when: "'already exists' not in osmiq_create_cluster_role.stderr and osmiq_create_cluster_role.rc != 0"
+ changed_when: osmiq_create_cluster_role.rc == 0
+
+- name: Configure role/user permissions
+ command: >
+ {{ openshift.common.admin_binary }} {{item}}
+ --config={{manage_iq_tmp_conf}}
+ with_items: "{{manage_iq_tasks}}"
+ register: osmiq_perm_task
+ failed_when: "'already exists' not in osmiq_perm_task.stderr and osmiq_perm_task.rc != 0"
+ changed_when: osmiq_perm_task.rc == 0
+
+- name: Clean temporary configuration file
+ command: >
+ rm -f {{manage_iq_tmp_conf}}
+ changed_when: false
diff --git a/roles/openshift_manageiq/vars/main.yml b/roles/openshift_manageiq/vars/main.yml
new file mode 100644
index 000000000..77e1c304b
--- /dev/null
+++ b/roles/openshift_manageiq/vars/main.yml
@@ -0,0 +1,24 @@
+manageiq_cluster_role:
+ apiVersion: v1
+ kind: ClusterRole
+ metadata:
+ name: management-infra-admin
+ rules:
+ - resources:
+ - pods/proxy
+ verbs:
+ - '*'
+
+manageiq_service_account:
+ apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: management-admin
+
+manage_iq_tmp_conf: /tmp/manageiq_admin.kubeconfig
+
+manage_iq_tasks:
+ - policy add-role-to-user -n management-infra admin -z management-admin
+ - policy add-role-to-user -n management-infra management-infra-admin -z management-admin
+ - policy add-cluster-role-to-user cluster-reader system:serviceaccount:management-infra:management-admin
+ - policy add-scc-to-user privileged system:serviceaccount:management-infra:management-admin
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 185bfb8f3..8a78f8f2a 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -1,13 +1,16 @@
---
-# TODO: add validation for openshift_master_identity_providers
# TODO: add ability to configure certificates given either a local file to
# point to or certificate contents, set in default cert locations.
-- assert:
- that:
- - openshift_master_oauth_grant_method in openshift_master_valid_grant_methods
- when: openshift_master_oauth_grant_method is defined
+# Authentication Variable Validation
+# TODO: validate the different identity provider kinds as well
+- fail:
+ msg: >
+ Invalid OAuth grant method: {{ openshift_master_oauth_grant_method }}
+ when: openshift_master_oauth_grant_method is defined and openshift_master_oauth_grant_method not in openshift_master_valid_grant_methods
+
+# HA Variable Validation
- fail:
msg: "openshift_master_cluster_method must be set to either 'native' or 'pacemaker' for multi-master installations"
when: openshift_master_ha | bool and ((openshift_master_cluster_method is not defined) or (openshift_master_cluster_method is defined and openshift_master_cluster_method not in ["native", "pacemaker"]))
@@ -76,16 +79,16 @@
- name: Install Master package
yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "yum"
+ register: install_result
+
+- name: Install Master package
+ dnf: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "dnf"
register: install_result
-# TODO: These values need to be configurable
-- name: Set dns facts
+- name: Re-gather package dependent master facts
openshift_facts:
- role: dns
- local_facts:
- ip: "{{ openshift_master_cluster_vip | default(openshift.common.ip, true) | default(None) }}"
- domain: cluster.local
- when: openshift.master.embedded_dns
- name: Create config parent directory if it does not exist
file:
@@ -115,7 +118,12 @@
- name: Install httpd-tools if needed
yum: pkg=httpd-tools state=present
- when: item.kind == 'HTPasswdPasswordIdentityProvider'
+ when: (ansible_pkg_mgr == "yum") and (item.kind == 'HTPasswdPasswordIdentityProvider')
+ with_items: openshift.master.identity_providers
+
+- name: Install httpd-tools if needed
+ dnf: pkg=httpd-tools state=present
+ when: (ansible_pkg_mgr == "dnf") and (item.kind == 'HTPasswdPasswordIdentityProvider')
with_items: openshift.master.identity_providers
- name: Ensure htpasswd directory exists
@@ -172,6 +180,9 @@
- restart master
- restart master api
+- set_fact:
+ translated_identity_providers: "{{ openshift.master.identity_providers | translate_idps('v1') }}"
+
# TODO: add the validate parameter when there is a validation command to run
- name: Create master config
template:
@@ -257,7 +268,12 @@
- name: Install cluster packages
yum: pkg=pcs state=present
- when: openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
+ when: (ansible_pkg_mgr == "yum") and openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
+ register: install_result
+
+- name: Install cluster packages
+ dnf: pkg=pcs state=present
+ when: (ansible_pkg_mgr == "dnf") and openshift_master_ha | bool and openshift.master.cluster_method == 'pacemaker'
register: install_result
- name: Start and enable cluster service
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index bb12a0a0f..cadb02fa3 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -27,9 +27,6 @@ corsAllowedOrigins:
{% for custom_origin in openshift.master.custom_cors_origins | default("") %}
- {{ custom_origin }}
{% endfor %}
-{% for name in (named_certificates | map(attribute='names')) | list | oo_flatten %}
- - {{ name }}
-{% endfor %}
{% if 'disabled_features' in openshift.master %}
disabledFeatures: {{ openshift.master.disabled_features | to_json }}
{% endif %}
@@ -86,7 +83,7 @@ kubernetesMasterConfig:
{% endif %}
apiServerArguments: {{ api_server_args if api_server_args is defined else 'null' }}
controllerArguments: {{ controller_args if controller_args is defined else 'null' }}
- masterCount: {{ openshift.master.master_count }}
+ masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
masterIP: {{ openshift.common.ip }}
podEvictionTimeout: ""
proxyClientInfo:
@@ -110,7 +107,24 @@ networkConfig:
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.master.portal_net }}
-{% include 'v1_partials/oauthConfig.j2' %}
+oauthConfig:
+ assetPublicURL: {{ openshift.master.public_console_url }}/
+ grantConfig:
+ method: {{ openshift.master.oauth_grant_method }}
+ identityProviders:
+{% for line in translated_identity_providers.splitlines() %}
+ {{ line }}
+{% endfor %}
+ masterCA: ca.crt
+ masterPublicURL: {{ openshift.master.public_api_url }}
+ masterURL: {{ openshift.master.api_url }}
+ sessionConfig:
+ sessionMaxAgeSeconds: {{ openshift.master.session_max_seconds }}
+ sessionName: {{ openshift.master.session_name }}
+ sessionSecretsFile: {{ openshift.master.session_secrets_file }}
+ tokenConfig:
+ accessTokenMaxAgeSeconds: {{ openshift.master.access_token_max_seconds }}
+ authorizeTokenMaxAgeSeconds: {{ openshift.master.auth_token_max_seconds }}
pauseControllers: false
policyConfig:
bootstrapPolicyFile: {{ openshift_master_policy }}
@@ -144,9 +158,9 @@ servingInfo:
keyFile: master.server.key
maxRequestsInFlight: 500
requestTimeoutSeconds: 3600
-{% if named_certificates %}
+{% if openshift.master.named_certificates %}
namedCertificates:
-{% for named_certificate in named_certificates %}
+{% for named_certificate in openshift.master.named_certificates %}
- certFile: {{ named_certificate['certfile'] }}
keyFile: {{ named_certificate['keyfile'] }}
names:
diff --git a/roles/openshift_master/templates/v1_partials/oauthConfig.j2 b/roles/openshift_master/templates/v1_partials/oauthConfig.j2
deleted file mode 100644
index 8a4f5a746..000000000
--- a/roles/openshift_master/templates/v1_partials/oauthConfig.j2
+++ /dev/null
@@ -1,93 +0,0 @@
-{% macro identity_provider_config(identity_provider) %}
- apiVersion: v1
- kind: {{ identity_provider.kind }}
-{% if identity_provider.kind == 'HTPasswdPasswordIdentityProvider' %}
- file: {{ identity_provider.filename }}
-{% elif identity_provider.kind == 'BasicAuthPasswordIdentityProvider' %}
- url: {{ identity_provider.url }}
-{% for key in ('ca', 'certFile', 'keyFile') %}
-{% if key in identity_provider %}
- {{ key }}: "{{ identity_provider[key] }}"
-{% endif %}
-{% endfor %}
-{% elif identity_provider.kind == 'LDAPPasswordIdentityProvider' %}
- attributes:
-{% for attribute_key in identity_provider.attributes %}
- {{ attribute_key }}:
-{% for attribute_value in identity_provider.attributes[attribute_key] %}
- - {{ attribute_value }}
-{% endfor %}
-{% endfor %}
-{% for key in ('bindDN', 'bindPassword', 'ca') %}
- {{ key }}: "{{ identity_provider[key] }}"
-{% endfor %}
-{% for key in ('insecure', 'url') %}
- {{ key }}: {{ identity_provider[key] }}
-{% endfor %}
-{% elif identity_provider.kind == 'RequestHeaderIdentityProvider' %}
- headers: {{ identity_provider.headers }}
-{% if 'clientCA' in identity_provider %}
- clientCA: {{ identity_provider.clientCA }}
-{% endif %}
-{% elif identity_provider.kind == 'GitHubIdentityProvider' %}
- clientID: {{ identity_provider.clientID }}
- clientSecret: {{ identity_provider.clientSecret }}
-{% elif identity_provider.kind == 'GoogleIdentityProvider' %}
- clientID: {{ identity_provider.clientID }}
- clientSecret: {{ identity_provider.clientSecret }}
-{% if 'hostedDomain' in identity_provider %}
- hostedDomain: {{ identity_provider.hostedDomain }}
-{% endif %}
-{% elif identity_provider.kind == 'OpenIDIdentityProvider' %}
- clientID: {{ identity_provider.clientID }}
- clientSecret: {{ identity_provider.clientSecret }}
- claims:
- id: identity_provider.claims.id
-{% for claim_key in ('preferredUsername', 'name', 'email') %}
-{% if claim_key in identity_provider.claims %}
- {{ claim_key }}: {{ identity_provider.claims[claim_key] }}
-{% endif %}
-{% endfor %}
- urls:
- authorize: {{ identity_provider.urls.authorize }}
- token: {{ identity_provider.urls.token }}
-{% if 'userInfo' in identity_provider.urls %}
- userInfo: {{ identity_provider.userInfo }}
-{% endif %}
-{% if 'extraScopes' in identity_provider %}
- extraScopes:
-{% for scope in identity_provider.extraScopes %}
- - {{ scope }}
-{% endfor %}
-{% endif %}
-{% if 'extraAuthorizeParameters' in identity_provider %}
- extraAuthorizeParameters:
-{% for param_key, param_value in identity_provider.extraAuthorizeParameters.iteritems() %}
- {{ param_key }}: {{ param_value }}
-{% endfor %}
-{% endif %}
-{% endif %}
-{% endmacro %}
-oauthConfig:
- assetPublicURL: {{ openshift.master.public_console_url }}/
- grantConfig:
- method: {{ openshift.master.oauth_grant_method }}
- identityProviders:
-{% for identity_provider in openshift.master.identity_providers %}
- - name: {{ identity_provider.name }}
- challenge: {{ identity_provider.challenge }}
- login: {{ identity_provider.login }}
- provider:
-{{ identity_provider_config(identity_provider) }}
-{%- endfor %}
- masterCA: ca.crt
- masterPublicURL: {{ openshift.master.public_api_url }}
- masterURL: {{ openshift.master.api_url }}
- sessionConfig:
- sessionMaxAgeSeconds: {{ openshift.master.session_max_seconds }}
- sessionName: {{ openshift.master.session_name }}
- sessionSecretsFile: {{ openshift.master.session_secrets_file }}
- tokenConfig:
- accessTokenMaxAgeSeconds: {{ openshift.master.access_token_max_seconds }}
- authorizeTokenMaxAgeSeconds: {{ openshift.master.auth_token_max_seconds }}
-{# Comment to preserve newline after authorizeTokenMaxAgeSeconds #}
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
index 314f068e7..caac13be3 100644
--- a/roles/openshift_master_ca/tasks/main.yml
+++ b/roles/openshift_master_ca/tasks/main.yml
@@ -1,6 +1,12 @@
---
- name: Install the base package for admin tooling
yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "yum"
+ register: install_result
+
+- name: Install the base package for admin tooling
+ dnf: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "dnf"
register: install_result
- name: Reload generated facts
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index d11bc5123..29e7eb532 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -1,12 +1,6 @@
---
# TODO: allow for overriding default ports where possible
- fail:
- msg: This role requres that osn_cluster_dns_domain is set
- when: osn_cluster_dns_domain is not defined or not osn_cluster_dns_domain
-- fail:
- msg: This role requres that osn_cluster_dns_ip is set
- when: osn_cluster_dns_ip is not defined or not osn_cluster_dns_ip
-- fail:
msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."
when: (not ansible_selinux or ansible_selinux.status != 'enabled') and deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']
@@ -20,6 +14,7 @@
hostname: "{{ openshift_hostname | default(none) }}"
public_hostname: "{{ openshift_public_hostname | default(none) }}"
deployment_type: "{{ openshift_deployment_type }}"
+ dns_ip: "{{ openshift_dns_ip | default(openshift_master_cluster_vip | default(None, true), true) }}"
- role: node
local_facts:
annotations: "{{ openshift_node_annotations | default(none) }}"
@@ -34,17 +29,29 @@
schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
+ set_node_ip: "{{ openshift_set_node_ip | default(None) }}"
# We have to add tuned-profiles in the same transaction otherwise we run into depsolving
# problems because the rpms don't pin the version properly.
- name: Install Node package
yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "yum"
+ register: node_install_result
+
+- name: Install Node package
+ dnf: pkg={{ openshift.common.service_type }}-node{{ openshift_version }},tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_version }} state=present
+ when: ansible_pkg_mgr == "dnf"
register: node_install_result
- name: Install sdn-ovs package
yum: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present
register: sdn_install_result
- when: openshift.common.use_openshift_sdn
+ when: ansible_pkg_mgr == "yum" and openshift.common.use_openshift_sdn
+
+- name: Install sdn-ovs package
+ dnf: pkg={{ openshift.common.service_type }}-sdn-ovs{{ openshift_version }} state=present
+ register: sdn_install_result
+ when: ansible_pkg_mgr == "dnf" and openshift.common.use_openshift_sdn
# TODO: add the validate parameter when there is a validation command to run
- name: Create the Node config
diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml
index b6936618a..b5146dcac 100644
--- a/roles/openshift_node/tasks/storage_plugins/ceph.yml
+++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml
@@ -3,3 +3,10 @@
yum:
pkg: ceph-common
state: installed
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install Ceph storage plugin dependencies
+ dnf:
+ pkg: ceph-common
+ state: installed
+ when: ansible_pkg_mgr == "dnf"
diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
index b812e81df..a357023e1 100644
--- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
@@ -3,10 +3,22 @@
yum:
pkg: glusterfs-fuse
state: installed
+ when: ansible_pkg_mgr == "yum"
-- name: Set seboolean to allow gluster storage plugin access from containers
+- name: Install GlusterFS storage plugin dependencies
+ dnf:
+ pkg: glusterfs-fuse
+ state: installed
+ when: ansible_pkg_mgr == "dnf"
+
+- name: Set sebooleans to allow gluster storage plugin access from containers
seboolean:
- name: virt_use_fusefs
+ name: "{{ item }}"
state: yes
persistent: yes
when: ansible_selinux and ansible_selinux.status == "enabled"
+ with_items:
+ - virt_use_fusefs
+ - virt_sandbox_use_fusefs
+ register: sebool_result
+ failed_when: "'state' not in sebool_result and 'msg' in sebool_result and 'SELinux boolean {{ item }} does not exist' not in sebool_result.msg"
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 7d2f506e3..23bd81f91 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -1,7 +1,9 @@
allowDisabledDocker: false
apiVersion: v1
-dnsDomain: {{ osn_cluster_dns_domain }}
-dnsIP: {{ osn_cluster_dns_ip }}
+dnsDomain: {{ openshift.common.dns_domain }}
+{% if 'dns_ip' in openshift.common %}
+dnsIP: {{ openshift.common.dns_ip }}
+{% endif %}
dockerConfig:
execHandlerName: ""
iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}"
@@ -23,7 +25,9 @@ networkConfig:
{% if openshift.common.use_openshift_sdn %}
networkPluginName: {{ openshift.common.sdn_network_plugin_name }}
{% endif %}
+{% if openshift.node.set_node_ip | bool %}
nodeIP: {{ openshift.common.ip }}
+{% endif %}
nodeName: {{ openshift.common.hostname | lower }}
podManifestConfig:
servingInfo:
diff --git a/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo b/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo
new file mode 100644
index 000000000..bc0435d82
--- /dev/null
+++ b/roles/openshift_repos/files/fedora-origin/repos/maxamillion-fedora-openshift-fedora.repo
@@ -0,0 +1,8 @@
+[maxamillion-fedora-openshift]
+name=Copr repo for fedora-openshift owned by maxamillion
+baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/
+skip_if_unavailable=True
+gpgcheck=1
+gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg
+enabled=1
+enabled_metadata=1 \ No newline at end of file
diff --git a/roles/openshift_repos/handlers/main.yml b/roles/openshift_repos/handlers/main.yml
new file mode 100644
index 000000000..fed4ab2f0
--- /dev/null
+++ b/roles/openshift_repos/handlers/main.yml
@@ -0,0 +1,6 @@
+---
+- name: refresh yum cache
+ command: yum clean all
+
+- name: refresh dnf cache
+ command: dnf clean all
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index aa696ae12..c55b5df89 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -14,33 +14,64 @@
yum:
pkg: libselinux-python
state: present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Ensure libselinux-python is installed
+ dnf:
+ pkg: libselinux-python
+ state: present
+ when: ansible_pkg_mgr == "dnf"
- name: Create any additional repos that are defined
template:
src: yum_repo.j2
dest: /etc/yum.repos.d/openshift_additional.repo
when: openshift_additional_repos | length > 0
+ notify: refresh yum cache
- name: Remove the additional repos if no longer defined
file:
dest: /etc/yum.repos.d/openshift_additional.repo
state: absent
when: openshift_additional_repos | length == 0
+ notify: refresh yum cache
-- name: Remove any yum repo files for other deployment types
+- name: Remove any yum repo files for other deployment types RHEL/CentOS
file:
path: "/etc/yum.repos.d/{{ item | basename }}"
state: absent
with_fileglob:
- '*/repos/*'
- when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos"))
+ when: not (item | search("/files/" ~ openshift_deployment_type ~ "/repos")) and
+ (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
+ notify: refresh yum cache
+
+- name: Remove any yum repo files for other deployment types Fedora
+ file:
+ path: "/etc/yum.repos.d/{{ item | basename }}"
+ state: absent
+ with_fileglob:
+ - '*/repos/*'
+ when: not (item | search("/files/fedora-" ~ openshift_deployment_type ~ "/repos")) and
+ (ansible_distribution == "Fedora")
+ notify: refresh dnf cache
- name: Configure gpg keys if needed
copy: src={{ item }} dest=/etc/pki/rpm-gpg/
with_fileglob:
- "{{ openshift_deployment_type }}/gpg_keys/*"
+ notify: refresh yum cache
-- name: Configure yum repositories
+- name: Configure yum repositories RHEL/CentOS
copy: src={{ item }} dest=/etc/yum.repos.d/
with_fileglob:
- "{{ openshift_deployment_type }}/repos/*"
+ notify: refresh yum cache
+ when: (ansible_os_family == "RedHat" and ansible_distribution != "Fedora")
+
+- name: Configure yum repositories Fedora
+ copy: src={{ item }} dest=/etc/yum.repos.d/
+ with_fileglob:
+ - "fedora-{{ openshift_deployment_type }}/repos/*"
+ notify: refresh dnf cache
+ when: (ansible_distribution == "Fedora")
diff --git a/roles/openshift_serviceaccounts/tasks/main.yml b/roles/openshift_serviceaccounts/tasks/main.yml
index d93a25a21..e558a83a2 100644
--- a/roles/openshift_serviceaccounts/tasks/main.yml
+++ b/roles/openshift_serviceaccounts/tasks/main.yml
@@ -13,7 +13,9 @@
changed_when: "'serviceaccounts \"{{ item }}\" already exists' not in _sa_result.stderr and _sa_result.rc == 0"
- name: Get current security context constraints
- shell: "{{ openshift.common.client_binary }} get scc privileged -o yaml > /tmp/scc.yaml"
+ shell: >
+ {{ openshift.common.client_binary }} get scc privileged -o yaml
+ --output-version=v1 > /tmp/scc.yaml
- name: Add security context constraint for {{ item }}
lineinfile:
@@ -23,4 +25,4 @@
with_items: accounts
- name: Apply new scc rules for service accounts
- command: "{{ openshift.common.client_binary }} update -f /tmp/scc.yaml"
+ command: "{{ openshift.common.client_binary }} update -f /tmp/scc.yaml --api-version=v1"
diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
index 65ae069df..bf23dfe98 100644
--- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
+++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml
@@ -1,6 +1,11 @@
---
- name: Install NFS server
yum: name=nfs-utils state=present
+ when: ansible_pkg_mgr == "yum"
+
+- name: Install NFS server
+ dnf: name=nfs-utils state=present
+ when: ansible_pkg_mgr == "dnf"
- name: Start rpcbind
service: name=rpcbind state=started enabled=yes
diff --git a/roles/os_env_extras/tasks/main.yaml b/roles/os_env_extras/tasks/main.yaml
index 96b12ad5b..29599559c 100644
--- a/roles/os_env_extras/tasks/main.yaml
+++ b/roles/os_env_extras/tasks/main.yaml
@@ -15,3 +15,10 @@
yum:
pkg: bash-completion
state: installed
+ when: ansible_pkg_mgr == "yum"
+
+- name: Bash Completion
+ dnf:
+ pkg: bash-completion
+ state: installed
+ when: ansible_pkg_mgr == "dnf"
diff --git a/roles/os_firewall/tasks/firewall/firewalld.yml b/roles/os_firewall/tasks/firewall/firewalld.yml
index 5089eb3e0..cf2a2c733 100644
--- a/roles/os_firewall/tasks/firewall/firewalld.yml
+++ b/roles/os_firewall/tasks/firewall/firewalld.yml
@@ -3,6 +3,14 @@
yum:
name: firewalld
state: present
+ when: ansible_pkg_mgr == "yum"
+ register: install_result
+
+- name: Install firewalld packages
+ dnf:
+ name: firewalld
+ state: present
+ when: ansible_pkg_mgr == "dnf"
register: install_result
- name: Check if iptables-services is installed
diff --git a/roles/os_firewall/tasks/firewall/iptables.yml b/roles/os_firewall/tasks/firewall/iptables.yml
index 9af9d8d29..36d51504c 100644
--- a/roles/os_firewall/tasks/firewall/iptables.yml
+++ b/roles/os_firewall/tasks/firewall/iptables.yml
@@ -6,6 +6,17 @@
with_items:
- iptables
- iptables-services
+ when: ansible_pkg_mgr == "yum"
+ register: install_result
+
+- name: Install iptables packages
+ dnf:
+ name: "{{ item }}"
+ state: present
+ with_items:
+ - iptables
+ - iptables-services
+ when: ansible_pkg_mgr == "dnf"
register: install_result
- name: Check if firewalld is installed
diff --git a/roles/os_update_latest/tasks/main.yml b/roles/os_update_latest/tasks/main.yml
index 4a2c3d47a..40eec8d35 100644
--- a/roles/os_update_latest/tasks/main.yml
+++ b/roles/os_update_latest/tasks/main.yml
@@ -1,3 +1,8 @@
---
- name: Update all packages
yum: name=* state=latest
+ when: ansible_pkg_mgr == "yum"
+
+- name: Update all packages
+ dnf: name=* state=latest
+ when: ansible_pkg_mgr == "dnf"
diff --git a/roles/os_zabbix/tasks/main.yml b/roles/os_zabbix/tasks/main.yml
index 59c89bb02..d0b307a3d 100644
--- a/roles/os_zabbix/tasks/main.yml
+++ b/roles/os_zabbix/tasks/main.yml
@@ -8,15 +8,35 @@
register: templates
- include_vars: template_heartbeat.yml
+ tags:
+ - heartbeat
- include_vars: template_os_linux.yml
+ tags:
+ - linux
- include_vars: template_docker.yml
+ tags:
+ - docker
- include_vars: template_openshift_master.yml
+ tags:
+ - openshift_master
- include_vars: template_openshift_node.yml
+ tags:
+ - openshift_node
- include_vars: template_ops_tools.yml
+ tags:
+ - ops_tools
- include_vars: template_app_zabbix_server.yml
+ tags:
+ - zabbix_server
- include_vars: template_app_zabbix_agent.yml
+ tags:
+ - zabbix_agent
- include_vars: template_performance_copilot.yml
+ tags:
+ - pcp
- include_vars: template_aws.yml
+ tags:
+ - aws
- name: Include Template Heartbeat
include: ../../lib_zabbix/tasks/create_template.yml
@@ -25,6 +45,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - heartbeat
- name: Include Template os_linux
include: ../../lib_zabbix/tasks/create_template.yml
@@ -33,6 +55,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - linux
- name: Include Template docker
include: ../../lib_zabbix/tasks/create_template.yml
@@ -41,6 +65,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - docker
- name: Include Template Openshift Master
include: ../../lib_zabbix/tasks/create_template.yml
@@ -49,6 +75,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - openshift_master
- name: Include Template Openshift Node
include: ../../lib_zabbix/tasks/create_template.yml
@@ -57,6 +85,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - openshift_node
- name: Include Template Ops Tools
include: ../../lib_zabbix/tasks/create_template.yml
@@ -65,6 +95,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - ops_tools
- name: Include Template App Zabbix Server
include: ../../lib_zabbix/tasks/create_template.yml
@@ -73,6 +105,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - zabbix_server
- name: Include Template App Zabbix Agent
include: ../../lib_zabbix/tasks/create_template.yml
@@ -81,6 +115,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - zabbix_agent
- name: Include Template Performance Copilot
include: ../../lib_zabbix/tasks/create_template.yml
@@ -89,6 +125,8 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - pcp
- name: Include Template AWS
include: ../../lib_zabbix/tasks/create_template.yml
@@ -97,3 +135,5 @@
server: "{{ ozb_server }}"
user: "{{ ozb_user }}"
password: "{{ ozb_password }}"
+ tags:
+ - aws
diff --git a/roles/os_zabbix/vars/template_aws.yml b/roles/os_zabbix/vars/template_aws.yml
index 0ed682128..57832a3fe 100644
--- a/roles/os_zabbix/vars/template_aws.yml
+++ b/roles/os_zabbix/vars/template_aws.yml
@@ -4,7 +4,7 @@ g_template_aws:
zdiscoveryrules:
- name: disc.aws
key: disc.aws
- lifetime: 1
+ lifetime: 14
description: "Dynamically register AWS bucket info"
zitemprototypes:
diff --git a/roles/os_zabbix/vars/template_openshift_master.yml b/roles/os_zabbix/vars/template_openshift_master.yml
index 6defc4989..514d6fd24 100644
--- a/roles/os_zabbix/vars/template_openshift_master.yml
+++ b/roles/os_zabbix/vars/template_openshift_master.yml
@@ -7,12 +7,31 @@ g_template_openshift_master:
- Openshift Master
key: create_app
+ - key: openshift.master.registry.healthz
+ description: "Shows the health status of the cluster's docker registry"
+ type: int
+ applications:
+ - Openshift Master
+
- key: openshift.master.process.count
description: Shows number of master processes running
type: int
applications:
- Openshift Master
+ - key: openshift.master.api.ping
+ description: "Verify that the Openshift API is up"
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.api.healthz
+ description: "Checks the healthz check of the master's api: https://master_host/healthz"
+ type: int
+ data_type: bool
+ applications:
+ - Openshift Master
+
- key: openshift.master.user.count
description: Shows number of users in a cluster
type: int
@@ -24,13 +43,61 @@ g_template_openshift_master:
type: int
applications:
- Openshift Master
-
- - key: openshift.project.counter
+
+ - key: openshift.master.pod.user.running.count
+ description: Shows number of user pods running (non infrastructure pods)
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pod.total.count
+ description: Shows total number of pods (running and non running)
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.node.count
+ description: Shows the total number of nodes found in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.project.count
description: Shows number of projects on a cluster
type: int
applications:
- Openshift Master
+ - key: openshift.master.pv.total.count
+ description: Total number of Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pv.available.count
+ description: Total number of Available Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pv.released.count
+ description: Total number of Released Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pv.bound.count
+ description: Total number of Bound Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
+ - key: openshift.master.pv.failed.count
+ description: Total number of Failed Persistent Volumes in the Openshift Cluster
+ type: int
+ applications:
+ - Openshift Master
+
- key: openshift.master.etcd.create.success
description: Show number of successful create actions
type: int
@@ -103,12 +170,67 @@ g_template_openshift_master:
applications:
- Openshift Etcd
- ztriggers:
- - name: 'Application creation has failed on {HOST.NAME}'
- expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc'
- priority: avg
+ - key: openshift.master.metric.ping
+ description: "This check verifies that the https://master/metrics check is alive and communicating properly."
+ type: int
+ applications:
+ - Openshift Master Metrics
+
+ - key: openshift.master.apiserver.latency.summary.pods.quantile.list.5
+ description: "Value from https://master/metrics. This is the time, in miliseconds, that 50% of the pod operations have taken to completed."
+ type: int
+ applications:
+ - Openshift Master Metrics
+
+ - key: openshift.master.apiserver.latency.summary.pods.quantile.list.9
+ description: "Value from https://master/metrics. This is the time, in miliseconds, that 90% of the pod operations have taken to completed."
+ type: int
+ applications:
+ - Openshift Master Metrics
+
+ - key: openshift.master.apiserver.latency.summary.pods.quantile.list.99
+ description: "Value from https://master/metrics. This is the time, in miliseconds, that 99% of the pod operations have taken to completed."
+ type: int
+ applications:
+ - Openshift Master Metrics
+
+ - key: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.5
+ description: "Value from https://master/metrics. This is the time, in miliseconds, that 50% of the pod operations have taken to completed."
+ type: int
+ applications:
+ - Openshift Master Metrics
+
+ - key: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.9
+ description: "Value from https://master/metrics. This is the time, in miliseconds, that 90% of the pod operations have taken to completed."
+ type: int
+ applications:
+ - Openshift Master Metrics
+
+ - key: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.99
+ description: "Value from https://master/metrics. This is the time, in miliseconds, that 99% of the pod operations have taken to completed."
+ type: int
+ applications:
+ - Openshift Master Metrics
+ - key: openshift.master.scheduler.e2e.scheduling.latency.quantile.5
+ description: "Value from https://master/metrics. This is the time, in miliseconds, that 50% of the end to end scheduling operations have taken to completed."
+ type: int
+ applications:
+ - Openshift Master Metrics
+
+ - key: openshift.master.scheduler.e2e.scheduling.latency.quantile.9
+ description: "Value from https://master/metrics. This is the time, in miliseconds, that 90% of the end to end scheduling operations have taken to completed."
+ type: int
+ applications:
+ - Openshift Master Metrics
+
+ - key: openshift.master.scheduler.e2e.scheduling.latency.quantile.99
+ description: "Value from https://master/metrics. This is the time, in miliseconds, that 99% of the end to end scheduling operations have taken to completed."
+ type: int
+ applications:
+ - Openshift Master Metrics
+
+ ztriggers:
- name: 'Openshift Master process not running on {HOST.NAME}'
expression: '{Template Openshift Master:openshift.master.process.count.max(#3)}<1'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
@@ -119,22 +241,92 @@ g_template_openshift_master:
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
priority: high
+ - name: 'Low number of etcd watchers on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ priority: avg
+
+ - name: 'Etcd ping failed on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ priority: high
+
- name: 'Number of users for Openshift Master on {HOST.NAME}'
expression: '{Template Openshift Master:openshift.master.user.count.last()}=0'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
priority: info
- name: 'There are no projects running on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.project.counter.last()}=0'
+ expression: '{Template Openshift Master:openshift.project.count.last()}=0'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
priority: info
- - name: 'Low number of etcd watchers on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.etcd.watchers.last(#1)}<10 and {Template Openshift Master:openshift.master.etcd.watchers.last(#2)}<10'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ # Put triggers that depend on other triggers here (deps must be created first)
+ - name: 'Application creation has failed on {HOST.NAME}'
+ expression: '{Template Openshift Master:create_app.last(#1)}=1 and {Template Openshift Master:create_app.last(#2)}=1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_create_app.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
priority: avg
- - name: 'Etcd ping failed on {HOST.NAME}'
- expression: '{Template Openshift Master:openshift.master.etcd.ping.last(#1)}=0 and {Template Openshift Master:openshift.master.etcd.ping.last(#2)}=0'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_etcd.asciidoc'
+ - name: 'Openshift Master API health check is failing on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.api.healthz.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
+ priority: high
+
+ - name: 'Openshift Master API PING check is failing on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.api.ping.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
+ priority: high
+
+ - name: 'Openshift Master metric PING check is failing on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.metric.ping.max(#3)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
+ priority: avg
+
+ - name: 'Docker Registry check failed on {HOST.NAME}'
+ expression: '{Template Openshift Master:openshift.master.registry.healthz.max(#2)}<1'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/openshift_master.asciidoc'
+ dependencies:
+ - 'Openshift Master process not running on {HOST.NAME}'
priority: high
+
+ zgraphs:
+ - name: Openshift Master API Server Latency Pods LIST Quantiles
+ width: 900
+ height: 200
+ graph_items:
+ - item_name: openshift.master.apiserver.latency.summary.pods.quantile.list.5
+ color: red
+ - item_name: openshift.master.apiserver.latency.summary.pods.quantile.list.9
+ color: blue
+ - item_name: openshift.master.apiserver.latency.summary.pods.quantile.list.99
+ color: orange
+
+ - name: Openshift Master API Server Latency Pods WATCHLIST Quantiles
+ width: 900
+ height: 200
+ graph_items:
+ - item_name: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.5
+ color: red
+ - item_name: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.9
+ color: blue
+ - item_name: openshift.master.apiserver.latency.summary.pods.quantile.watchlist.99
+ color: orange
+
+ - name: Openshift Master Scheduler End to End Latency Quantiles
+ width: 900
+ height: 200
+ graph_items:
+ - item_name: openshift.master.scheduler.e2e.scheduling.latency.quantile.5
+ color: red
+ - item_name: openshift.master.scheduler.e2e.scheduling.latency.quantile.9
+ color: blue
+ - item_name: openshift.master.scheduler.e2e.scheduling.latency.quantile.99
+ color: orange
diff --git a/roles/os_zabbix/vars/template_os_linux.yml b/roles/os_zabbix/vars/template_os_linux.yml
index 04665be62..c6e557f12 100644
--- a/roles/os_zabbix/vars/template_os_linux.yml
+++ b/roles/os_zabbix/vars/template_os_linux.yml
@@ -258,26 +258,34 @@ g_template_os_linux:
- Network
ztriggerprototypes:
- - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
- expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'
- url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
- priority: warn
-
- name: 'Filesystem: {#OSO_FILESYS} has less than 10% free disk space on {HOST.NAME}'
expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>90'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
priority: high
- - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free inodes on {HOST.NAME}'
- expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>90'
+ # This has a dependency on the previous trigger
+ # Trigger Prototypes do not work in 2.4. They will work in Zabbix 3.0
+ - name: 'Filesystem: {#OSO_FILESYS} has less than 15% free disk space on {HOST.NAME}'
+ expression: '{Template OS Linux:disc.filesys.full[{#OSO_FILESYS}].last()}>85'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
priority: warn
+ dependencies:
+ - 'Filesystem: {#OSO_FILESYS} has less than 10% free disk space on {HOST.NAME}'
- name: 'Filesystem: {#OSO_FILESYS} has less than 5% free inodes on {HOST.NAME}'
expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>95'
url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
priority: high
+ # This has a dependency on the previous trigger
+ # Trigger Prototypes do not work in 2.4. They will work in Zabbix 3.0
+ - name: 'Filesystem: {#OSO_FILESYS} has less than 10% free inodes on {HOST.NAME}'
+ expression: '{Template OS Linux:disc.filesys.inodes.pused[{#OSO_FILESYS}].last()}>90'
+ url: 'https://github.com/openshift/ops-sop/blob/master/V3/Alerts/check_filesys_full.asciidoc'
+ priority: warn
+ dependencies:
+ - 'Filesystem: {#OSO_FILESYS} has less than 5% free inodes on {HOST.NAME}'
+
ztriggers:
- name: 'Too many TOTAL processes on {HOST.NAME}'
expression: '{Template OS Linux:proc.nprocs.last()}>5000'
diff --git a/roles/oso_host_monitoring/README.md b/roles/oso_host_monitoring/README.md
new file mode 100644
index 000000000..f1fa05adb
--- /dev/null
+++ b/roles/oso_host_monitoring/README.md
@@ -0,0 +1,50 @@
+Role Name
+=========
+
+Applies local host monitoring container(s).
+
+Requirements
+------------
+
+None.
+
+Role Variables
+--------------
+
+osohm_zagg_web_url: where to contact monitoring service
+osohm_host_monitoring: name of host monitoring container
+osohm_zagg_client: name of container with zabbix client
+osohm_docker_registry_url: docker repository containing above containers
+osohm_default_zagg_server_user: login info to zabbix server
+osohm_default_zagg_password: password to zabbix server
+
+Dependencies
+------------
+
+None.
+
+Example Playbook
+----------------
+
+Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
+
+ - hosts: servers
+ roles:
+ - oso_host_monitoring
+ vars:
+ osohm_zagg_web_url: "https://..."
+ osohm_host_monitoring: "oso-rhel7-host-monitoring"
+ osohm_zagg_client: "oso-rhel7-zagg-client"
+ osohm_docker_registry_url: "docker-registry.example.com/mon/"
+ osohm_default_zagg_server_user: "zagg-client"
+ osohm_default_zagg_password: "secret"
+
+License
+-------
+
+ASL 2.0
+
+Author Information
+------------------
+
+OpenShift operations, Red Hat, Inc
diff --git a/roles/oso_host_monitoring/defaults/main.yml b/roles/oso_host_monitoring/defaults/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/oso_host_monitoring/defaults/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/oso_host_monitoring/handlers/main.yml b/roles/oso_host_monitoring/handlers/main.yml
new file mode 100644
index 000000000..7863ad15b
--- /dev/null
+++ b/roles/oso_host_monitoring/handlers/main.yml
@@ -0,0 +1,12 @@
+---
+- name: "Restart the {{ osohm_host_monitoring }} service"
+ service:
+ name: "{{ osohm_host_monitoring }}"
+ state: restarted
+ enabled: yes
+
+- name: "Restart the {{ osohm_zagg_client }} service"
+ service:
+ name: "{{ osohm_zagg_client }}"
+ state: restarted
+ enabled: yes
diff --git a/roles/oso_host_monitoring/meta/main.yml b/roles/oso_host_monitoring/meta/main.yml
new file mode 100644
index 000000000..cce30c2db
--- /dev/null
+++ b/roles/oso_host_monitoring/meta/main.yml
@@ -0,0 +1,8 @@
+---
+galaxy_info:
+ author: OpenShift
+ description: apply monitoring container(s).
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 1.2
+dependencies: []
diff --git a/roles/oso_host_monitoring/tasks/main.yml b/roles/oso_host_monitoring/tasks/main.yml
new file mode 100644
index 000000000..6ddfa3dcb
--- /dev/null
+++ b/roles/oso_host_monitoring/tasks/main.yml
@@ -0,0 +1,65 @@
+---
+- fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - osohm_zagg_web_url
+ - osohm_host_monitoring
+ - osohm_zagg_client
+ - osohm_docker_registry_url
+ - osohm_default_zagg_server_user
+ - osohm_default_zagg_server_password
+
+- name: create /etc/docker/ops
+ file:
+ path: /etc/docker/ops
+ state: directory
+ mode: 0770
+ group: root
+ owner: root
+
+- name: Copy dockercfg to /etc/docker/ops
+ template:
+ src: docker-registry.ops.cfg.j2
+ dest: /etc/docker/ops/.dockercfg
+ owner: root
+ group: root
+ mode: 0600
+
+- name: "Copy {{ osohm_host_monitoring }} systemd file"
+ template:
+ src: "{{ osohm_host_monitoring }}.service.j2"
+ dest: "/etc/systemd/system/{{ osohm_host_monitoring }}.service"
+ owner: root
+ group: root
+ mode: 0644
+ notify:
+ - "Restart the {{ osohm_host_monitoring }} service"
+ register: systemd_host_monitoring
+
+- name: "Copy {{ osohm_zagg_client }} systemd file"
+ template:
+ src: "{{ osohm_zagg_client }}.service.j2"
+ dest: "/etc/systemd/system/{{ osohm_zagg_client }}.service"
+ owner: root
+ group: root
+ mode: 0644
+ notify:
+ - "Restart the {{ osohm_zagg_client }} service"
+ register: zagg_systemd
+
+- name: reload systemd
+ command: /usr/bin/systemctl --system daemon-reload
+ when: systemd_host_monitoring | changed or zagg_systemd | changed
+
+- name: "Start the {{ osohm_host_monitoring }} service"
+ service:
+ name: "{{ osohm_host_monitoring }}"
+ state: started
+ enabled: yes
+
+- name: "Start the {{ osohm_zagg_client }} service"
+ service:
+ name: "{{ osohm_zagg_client }}"
+ state: started
+ enabled: yes
diff --git a/roles/oso_host_monitoring/templates/docker-registry.ops.cfg.j2 b/roles/oso_host_monitoring/templates/docker-registry.ops.cfg.j2
new file mode 100644
index 000000000..9e49da469
--- /dev/null
+++ b/roles/oso_host_monitoring/templates/docker-registry.ops.cfg.j2
@@ -0,0 +1 @@
+{"{{ osohm_docker_registry_ops_url }}":{"auth":"{{ osohm_docker_registry_ops_key }}","email":"{{ osohm_docker_registry_ops_email }}"}}
diff --git a/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2 b/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2
new file mode 100644
index 000000000..d18ad90fe
--- /dev/null
+++ b/roles/oso_host_monitoring/templates/oso-f22-host-monitoring.service.j2
@@ -0,0 +1,43 @@
+# This is a systemd file to run this docker container under systemd.
+# To make this work:
+# * pull the image (probably from ops docker registry)
+# * place this file in /etc/systemd/system without the .systemd extension
+# * run the commands:
+# systemctl daemon-reload
+# systemctl enable pcp-docker
+# systemctl start pcp-docker
+#
+#
+[Unit]
+Description=PCP Collector Contatainer
+Requires=docker.service
+After=docker.service
+
+
+[Service]
+Type=simple
+TimeoutStartSec=5m
+Environment=HOME=/etc/docker/ops
+#Slice=container-small.slice
+
+# systemd syntax '=-' ignore errors from return codes.
+ExecStartPre=-/usr/bin/docker kill "{{ osohm_host_monitoring }}"
+ExecStartPre=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
+ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}"
+
+
+ExecStart=/usr/bin/docker run --rm --name="{{ osohm_host_monitoring }}" \
+ --privileged --net=host --pid=host --ipc=host \
+ -v /sys:/sys:ro -v /etc/localtime:/etc/localtime:ro \
+ -v /var/lib/docker:/var/lib/docker:ro -v /run:/run \
+ -v /var/log:/var/log \
+ {{ osohm_docker_registry_url }}{{ osohm_host_monitoring }}
+
+ExecReload=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
+ExecReload=-/usr/bin/docker rm "{{ osohm_host_monitoring }}"
+ExecStop=-/usr/bin/docker stop "{{ osohm_host_monitoring }}"
+Restart=always
+RestartSec=30
+
+[Install]
+WantedBy=default.target
diff --git a/roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j2 b/roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j2
new file mode 100644
index 000000000..978e40b88
--- /dev/null
+++ b/roles/oso_host_monitoring/templates/oso-rhel7-zagg-client.service.j2
@@ -0,0 +1,62 @@
+# This is a systemd file to run this docker container under systemd.
+# To make this work:
+# * pull the image (probably from ops docker registry)
+# * place this file in /etc/systemd/system without the .systemd extension
+# * run the commands:
+# systemctl daemon-reload
+# systemctl enable zagg-client-docker
+# systemctl start zagg-client-docker
+#
+#
+[Unit]
+Description=Zagg Client Contatainer
+Requires=docker.service
+After=docker.service
+
+
+[Service]
+Type=simple
+TimeoutStartSec=5m
+Environment=HOME=/etc/docker/ops
+#Slice=container-small.slice
+
+# systemd syntax '=-' ignore errors from return codes.
+ExecStartPre=-/usr/bin/docker kill "{{ osohm_zagg_client }}"
+ExecStartPre=-/usr/bin/docker rm "{{ osohm_zagg_client }}"
+ExecStartPre=-/usr/bin/docker pull "{{ osohm_docker_registry_url }}{{ osohm_zagg_client }}"
+
+
+ExecStart=/usr/bin/docker run --name {{ osohm_zagg_client }} \
+ --privileged \
+ --pid=host \
+ --net=host \
+ -e ZAGG_URL={{ osohm_zagg_web_url }} \
+ -e ZAGG_USER={{ osohm_default_zagg_server_user }} \
+ -e ZAGG_PASSWORD={{ osohm_default_zagg_server_password }} \
+ -e ZAGG_CLIENT_HOSTNAME={{ ec2_tag_Name }} \
+ -e ZAGG_SSL_VERIFY={{ osohm_zagg_verify_ssl }} \
+ -e OSO_CLUSTER_GROUP={{ cluster_group }} \
+ -e OSO_CLUSTER_ID={{ oo_clusterid }} \
+ -e OSO_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_host-type'] }} \
+ -e OSO_SUB_HOST_TYPE={{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }} \
+ -v /etc/localtime:/etc/localtime \
+ -v /run/pcp:/run/pcp \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ -v /var/run/openvswitch/db.sock:/var/run/openvswitch/db.sock \
+{% if hostvars[inventory_hostname]['ec2_tag_host-type'] == 'master' %}
+ -v /etc/openshift/master/admin.kubeconfig:/etc/openshift/master/admin.kubeconfig \
+ -v /etc/openshift/master/master.etcd-client.crt:/etc/openshift/master/master.etcd-client.crt \
+ -v /etc/openshift/master/master.etcd-client.key:/etc/openshift/master/master.etcd-client.key \
+ -v /etc/openshift/master/master-config.yaml:/etc/openshift/master/master-config.yaml \
+{% endif %}
+ {{ osohm_docker_registry_url }}{{ osohm_zagg_client }}
+
+
+ExecReload=-/usr/bin/docker stop "{{ osohm_zagg_client }}"
+ExecReload=-/usr/bin/docker rm "{{ osohm_zagg_client }}"
+ExecStop=-/usr/bin/docker stop "{{ osohm_zagg_client }}"
+Restart=always
+RestartSec=30
+
+[Install]
+WantedBy=default.target
diff --git a/roles/oso_host_monitoring/vars/main.yml b/roles/oso_host_monitoring/vars/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/oso_host_monitoring/vars/main.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/rhel_subscribe/tasks/main.yml b/roles/rhel_subscribe/tasks/main.yml
index 8fb2fc042..30c0920a1 100644
--- a/roles/rhel_subscribe/tasks/main.yml
+++ b/roles/rhel_subscribe/tasks/main.yml
@@ -6,19 +6,26 @@
- set_fact:
rhel_subscription_user: "{{ lookup('oo_option', 'rhel_subscription_user') | default(rhsub_user, True) | default(omit, True) }}"
rhel_subscription_pass: "{{ lookup('oo_option', 'rhel_subscription_pass') | default(rhsub_pass, True) | default(omit, True) }}"
+ rhel_subscription_server: "{{ lookup('oo_option', 'rhel_subscription_server') | default(rhsub_server) }}"
- fail:
msg: "This role is only supported for Red Hat hosts"
when: ansible_distribution != 'RedHat'
- fail:
- msg: Either rsub_user or the rhel_subscription_user env variable are required for this role.
+ msg: Either rhsub_user or the rhel_subscription_user env variable are required for this role.
when: rhel_subscription_user is not defined
- fail:
- msg: Either rsub_pass or the rhel_subscription_pass env variable are required for this role.
+ msg: Either rhsub_pass or the rhel_subscription_pass env variable are required for this role.
when: rhel_subscription_pass is not defined
+- name: Satellite preparation
+ command: "rpm -Uvh http://{{ rhel_subscription_server }}/pub/katello-ca-consumer-latest.noarch.rpm"
+ args:
+ creates: /etc/rhsm/ca/katello-server-ca.pem
+ when: rhel_subscription_server is defined and rhel_subscription_server
+
- name: RedHat subscriptions
redhat_subscription:
username: "{{ rhel_subscription_user }}"
diff --git a/roles/tito/README.md b/roles/tito/README.md
new file mode 100644
index 000000000..c4e2856dc
--- /dev/null
+++ b/roles/tito/README.md
@@ -0,0 +1,38 @@
+Role Name
+=========
+
+This role manages Tito.
+
+https://github.com/dgoodwin/tito
+
+Requirements
+------------
+
+None
+
+Role Variables
+--------------
+
+None
+
+Dependencies
+------------
+
+None
+
+Example Playbook
+----------------
+
+ - hosts: servers
+ roles:
+ - role: tito
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Thomas Wiest
diff --git a/roles/tito/defaults/main.yml b/roles/tito/defaults/main.yml
new file mode 100644
index 000000000..dd7cd269e
--- /dev/null
+++ b/roles/tito/defaults/main.yml
@@ -0,0 +1,2 @@
+---
+# defaults file for tito
diff --git a/roles/tito/handlers/main.yml b/roles/tito/handlers/main.yml
new file mode 100644
index 000000000..e9ce609d5
--- /dev/null
+++ b/roles/tito/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for tito
diff --git a/roles/tito/meta/main.yml b/roles/tito/meta/main.yml
new file mode 100644
index 000000000..fb121c08e
--- /dev/null
+++ b/roles/tito/meta/main.yml
@@ -0,0 +1,14 @@
+---
+galaxy_info:
+ author: Thomas Wiest
+ description: Manages Tito
+ company: Red Hat
+ license: Apache License, Version 2.0
+ min_ansible_version: 1.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - packaging
+dependencies: []
diff --git a/roles/tito/tasks/main.yml b/roles/tito/tasks/main.yml
new file mode 100644
index 000000000..f7b4ef363
--- /dev/null
+++ b/roles/tito/tasks/main.yml
@@ -0,0 +1,4 @@
+---
+- yum:
+ name: tito
+ state: present
diff --git a/roles/tito/vars/main.yml b/roles/tito/vars/main.yml
new file mode 100644
index 000000000..8a1aafc41
--- /dev/null
+++ b/roles/tito/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for tito
diff --git a/roles/yum_repos/README.md b/roles/yum_repos/README.md
index 51ecd5d34..908ab4972 100644
--- a/roles/yum_repos/README.md
+++ b/roles/yum_repos/README.md
@@ -6,7 +6,7 @@ This role allows easy deployment of yum repository config files.
Requirements
------------
-Yum
+Yum or dnf
Role Variables
--------------
diff --git a/utils/site_assets/oo-install-bootstrap.sh b/utils/site_assets/oo-install-bootstrap.sh
index e1b2cec90..3847c029a 100755
--- a/utils/site_assets/oo-install-bootstrap.sh
+++ b/utils/site_assets/oo-install-bootstrap.sh
@@ -9,6 +9,13 @@ cmdlnargs="$@"
: ${OO_INSTALL_LOG:=${TMPDIR}/INSTALLPKGNAME.log}
[[ $TMPDIR != */ ]] && TMPDIR="${TMPDIR}/"
+if rpm -q dnf;
+then
+ PKG_MGR="dnf"
+else
+ PKG_MGR="yum"
+fi
+
if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
then
clear
@@ -18,7 +25,7 @@ if [ -e /etc/redhat-release ]
then
for i in python python-virtualenv openssh-clients gcc
do
- rpm -q $i >/dev/null 2>&1 || { echo >&2 "Missing installation dependency detected. Please run \"yum install ${i}\"."; exit 1; }
+ rpm -q $i >/dev/null 2>&1 || { echo >&2 "Missing installation dependency detected. Please run \"${PKG_MGR} install ${i}\"."; exit 1; }
done
fi
for i in python virtualenv ssh gcc
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index 3c3f45c3b..8cabe5431 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -8,6 +8,7 @@ import re
import sys
from ooinstall import openshift_ansible
from ooinstall import OOConfig
+from ooinstall.oo_config import OOConfigInvalidHostError
from ooinstall.oo_config import Host
from ooinstall.variants import find_variant, get_variant_version_combos
@@ -71,7 +72,7 @@ def delete_hosts(hosts):
click.echo("\"{}\" doesn't coorespond to any valid input.".format(del_idx))
return hosts, None
-def collect_hosts():
+def collect_hosts(version=None, masters_set=False, print_summary=True):
"""
Collect host information from user. This will later be filled in using
ansible.
@@ -79,19 +80,31 @@ def collect_hosts():
Returns: a list of host information collected from the user
"""
click.clear()
- click.echo('***Host Configuration***')
+ click.echo('*** Host Configuration ***')
message = """
-The OpenShift Master serves the API and web console. It also coordinates the
-jobs that have to run across the environment. It can even run the datastore.
-For wizard based installations the database will be embedded. It's possible to
-change this later using etcd from Red Hat Enterprise Linux 7.
+You must now specify the hosts that will compose your OpenShift cluster.
+
+Please enter an IP or hostname to connect to for each system in the cluster.
+You will then be prompted to identify what role you would like this system to
+serve in the cluster.
+
+OpenShift Masters serve the API and web console and coordinate the jobs to run
+across the environment. If desired you can specify multiple Master systems for
+an HA deployment, in which case you will be prompted to identify a *separate*
+system to act as the load balancer for your cluster after all Masters and Nodes
+are defined.
+
+If only one Master is specified, an etcd instance embedded within the OpenShift
+Master service will be used as the datastore. This can be later replaced with a
+separate etcd instance if desired. If multiple Masters are specified, a
+separate etcd cluster will be configured with each Master serving as a member.
Any Masters configured as part of this installation process will also be
configured as Nodes. This is so that the Master will be able to proxy to Pods
-from the API. By default this Node will be unscheduleable but this can be changed
+from the API. By default this Node will be unschedulable but this can be changed
after installation with 'oadm manage-node'.
-The OpenShift Node provides the runtime environments for containers. It will
+OpenShift Nodes provide the runtime environments for containers. They will
host the required services to be managed by the Master.
http://docs.openshift.com/enterprise/latest/architecture/infrastructure_components/kubernetes_infrastructure.html#master
@@ -101,15 +114,19 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
hosts = []
more_hosts = True
+ num_masters = 0
while more_hosts:
host_props = {}
- hostname_or_ip = click.prompt('Enter hostname or IP address:',
- default='',
- value_proc=validate_prompt_hostname)
+ host_props['connect_to'] = click.prompt('Enter hostname or IP address',
+ value_proc=validate_prompt_hostname)
- host_props['connect_to'] = hostname_or_ip
+ if not masters_set:
+ if click.confirm('Will this host be an OpenShift Master?'):
+ host_props['master'] = True
+ num_masters += 1
- host_props['master'] = click.confirm('Will this host be an OpenShift Master?')
+ if version == '3.0':
+ masters_set = True
host_props['node'] = True
#TODO: Reenable this option once container installs are out of tech preview
@@ -126,9 +143,142 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
hosts.append(host)
- more_hosts = click.confirm('Do you want to add additional hosts?')
+ if print_summary:
+ print_installation_summary(hosts)
+
+ # If we have one master, this is enough for an all-in-one deployment,
+ # thus we can start asking if you wish to proceed. Otherwise we assume
+ # you must.
+ if masters_set or num_masters != 2:
+ more_hosts = click.confirm('Do you want to add additional hosts?')
+
+ if num_masters >= 3:
+ collect_master_lb(hosts)
+
return hosts
+
+def print_installation_summary(hosts):
+ """
+ Displays a summary of all hosts configured thus far, and what role each
+ will play.
+
+ Shows total nodes/masters, hints for performing/modifying the deployment
+ with additional setup, warnings for invalid or sub-optimal configurations.
+ """
+ click.clear()
+ click.echo('*** Installation Summary ***\n')
+ click.echo('Hosts:')
+ for host in hosts:
+ print_host_summary(hosts, host)
+
+ masters = [host for host in hosts if host.master]
+ nodes = [host for host in hosts if host.node]
+ dedicated_nodes = [host for host in hosts if host.node and not host.master]
+ click.echo('')
+ click.echo('Total OpenShift Masters: %s' % len(masters))
+ click.echo('Total OpenShift Nodes: %s' % len(nodes))
+
+ if len(masters) == 1:
+ ha_hint_message = """
+NOTE: Add a total of 3 or more Masters to perform an HA installation."""
+ click.echo(ha_hint_message)
+ elif len(masters) == 2:
+ min_masters_message = """
+WARNING: A minimum of 3 masters are required to perform an HA installation.
+Please add one more to proceed."""
+ click.echo(min_masters_message)
+ elif len(masters) >= 3:
+ ha_message = """
+NOTE: Multiple Masters specified, this will be an HA deployment with a separate
+etcd cluster. You will be prompted to provide the FQDN of a load balancer once
+finished entering hosts."""
+ click.echo(ha_message)
+
+ dedicated_nodes_message = """
+WARNING: Dedicated Nodes are recommended for an HA deployment. If no dedicated
+Nodes are specified, each configured Master will be marked as a schedulable
+Node."""
+
+ min_ha_nodes_message = """
+WARNING: A minimum of 3 dedicated Nodes are recommended for an HA
+deployment."""
+ if len(dedicated_nodes) == 0:
+ click.echo(dedicated_nodes_message)
+ elif len(dedicated_nodes) < 3:
+ click.echo(min_ha_nodes_message)
+
+ click.echo('')
+
+
+def print_host_summary(all_hosts, host):
+ click.echo("- %s" % host.connect_to)
+ if host.master:
+ click.echo(" - OpenShift Master")
+ if host.node:
+ if host.is_dedicated_node():
+ click.echo(" - OpenShift Node (Dedicated)")
+ elif host.is_schedulable_node(all_hosts):
+ click.echo(" - OpenShift Node")
+ else:
+ click.echo(" - OpenShift Node (Unscheduled)")
+ if host.master_lb:
+ if host.preconfigured:
+ click.echo(" - Load Balancer (Preconfigured)")
+ else:
+ click.echo(" - Load Balancer (HAProxy)")
+ if host.master:
+ if host.is_etcd_member(all_hosts):
+ click.echo(" - Etcd Member")
+ else:
+ click.echo(" - Etcd (Embedded)")
+
+
+def collect_master_lb(hosts):
+ """
+ Get a valid load balancer from the user and append it to the list of
+ hosts.
+
+ Ensure user does not specify a system already used as a master/node as
+ this is an invalid configuration.
+ """
+ message = """
+Setting up High Availability Masters requires a load balancing solution.
+Please provide a the FQDN of a host that will be configured as a proxy. This
+can be either an existing load balancer configured to balance all masters on
+port 8443 or a new host that will have HAProxy installed on it.
+
+If the host provided does is not yet configured, a reference haproxy load
+balancer will be installed. It's important to note that while the rest of the
+environment will be fault tolerant this reference load balancer will not be.
+It can be replaced post-installation with a load balancer with the same
+hostname.
+"""
+ click.echo(message)
+ host_props = {}
+
+ # Using an embedded function here so we have access to the hosts list:
+ def validate_prompt_lb(hostname):
+ # Run the standard hostname check first:
+ hostname = validate_prompt_hostname(hostname)
+
+ # Make sure this host wasn't already specified:
+ for host in hosts:
+ if host.connect_to == hostname and (host.master or host.node):
+ raise click.BadParameter('Cannot re-use "%s" as a load balancer, '
+ 'please specify a separate host' % hostname)
+ return hostname
+
+ host_props['connect_to'] = click.prompt('Enter hostname or IP address',
+ value_proc=validate_prompt_lb)
+ install_haproxy = click.confirm('Should the reference haproxy load balancer be installed on this host?')
+ host_props['preconfigured'] = not install_haproxy
+ host_props['master'] = False
+ host_props['node'] = False
+ host_props['master_lb'] = True
+ master_lb = Host(**host_props)
+ hosts.append(master_lb)
+
def confirm_hosts_facts(oo_cfg, callback_facts):
hosts = oo_cfg.hosts
click.clear()
@@ -166,6 +316,8 @@ Notes:
default_facts_lines = []
default_facts = {}
for h in hosts:
+ if h.preconfigured == True:
+ continue
default_facts[h.connect_to] = {}
h.ip = callback_facts[h.connect_to]["common"]["ip"]
h.public_ip = callback_facts[h.connect_to]["common"]["public_ip"]
@@ -196,7 +348,50 @@ Edit %s with the desired values and run `atomic-openshift-installer --unattended
sys.exit(0)
return default_facts
-def get_variant_and_version():
+
+
+def check_hosts_config(oo_cfg, unattended):
+ click.clear()
+ masters = [host for host in oo_cfg.hosts if host.master]
+
+ if len(masters) == 2:
+ click.echo("A minimum of 3 Masters are required for HA deployments.")
+ sys.exit(1)
+
+ if len(masters) > 1:
+ master_lb = [host for host in oo_cfg.hosts if host.master_lb]
+ if len(master_lb) > 1:
+ click.echo('ERROR: More than one Master load balancer specified. Only one is allowed.')
+ sys.exit(1)
+ elif len(master_lb) == 1:
+ if master_lb[0].master or master_lb[0].node:
+ click.echo('ERROR: The Master load balancer is configured as a master or node. Please correct this.')
+ sys.exit(1)
+ else:
+ message = """
+ERROR: No master load balancer specified in config. You must provide the FQDN
+of a load balancer to balance the API (port 8443) on all Master hosts.
+
+https://docs.openshift.org/latest/install_config/install/advanced_install.html#multiple-masters
+"""
+ click.echo(message)
+ sys.exit(1)
+
+ dedicated_nodes = [host for host in oo_cfg.hosts if host.node and not host.master]
+ if len(dedicated_nodes) == 0:
+ message = """
+WARNING: No dedicated Nodes specified. By default, colocated Masters have
+their Nodes set to unschedulable. If you proceed all nodes will be labelled
+as schedulable.
+"""
+ if unattended:
+ click.echo(message)
+ else:
+ confirm_continue(message)
+
+ return
+
+def get_variant_and_version(multi_master=False):
message = "\nWhich variant would you like to install?\n\n"
i = 1
@@ -205,15 +400,19 @@ def get_variant_and_version():
message = "%s\n(%s) %s %s" % (message, i, variant.description,
version.name)
i = i + 1
+ message = "%s\n" % message
click.echo(message)
+ if multi_master:
+ click.echo('NOTE: 3.0 installations are not')
response = click.prompt("Choose a variant from above: ", default=1)
product, version = combos[response - 1]
return product, version
def confirm_continue(message):
- click.echo(message)
+ if message:
+ click.echo(message)
click.confirm("Are you ready to continue?", default=False, abort=True)
return
@@ -288,27 +487,27 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h
oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user()
click.clear()
- if not oo_cfg.hosts:
- oo_cfg.hosts = collect_hosts()
- click.clear()
-
if oo_cfg.settings.get('variant', '') == '':
variant, version = get_variant_and_version()
oo_cfg.settings['variant'] = variant.name
oo_cfg.settings['variant_version'] = version.name
click.clear()
+ if not oo_cfg.hosts:
+ oo_cfg.hosts = collect_hosts(version=oo_cfg.settings['variant_version'])
+ click.clear()
+
return oo_cfg
def collect_new_nodes():
click.clear()
- click.echo('***New Node Configuration***')
+ click.echo('*** New Node Configuration ***')
message = """
Add new nodes here
"""
click.echo(message)
- return collect_hosts()
+ return collect_hosts(masters_set=True, print_summary=False)
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
@@ -334,7 +533,8 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
# This check has to happen before we start removing hosts later in this method
if not force:
if not unattended:
- click.echo('By default the installer only adds new nodes to an installed environment.')
+ click.echo('By default the installer only adds new nodes ' \
+ 'to an installed environment.')
response = click.prompt('Do you want to (1) only add additional nodes or ' \
'(2) reinstall the existing hosts ' \
'potentially erasing any custom changes?',
@@ -371,8 +571,8 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
else:
if unattended:
if not force:
- click.echo('Installed environment detected and no additional nodes specified: ' \
- 'aborting. If you want a fresh install, use ' \
+ click.echo('Installed environment detected and no additional ' \
+ 'nodes specified: aborting. If you want a fresh install, use ' \
'`atomic-openshift-installer install --force`')
sys.exit(1)
else:
@@ -386,8 +586,8 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
click.echo('Gathering information from hosts...')
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose)
if error:
- click.echo("There was a problem fetching the required information. " \
- "See {} for details.".format(oo_cfg.settings['ansible_log_path']))
+ click.echo("There was a problem fetching the required information. See " \
+ "{} for details.".format(oo_cfg.settings['ansible_log_path']))
sys.exit(1)
else:
pass # proceeding as normal should do a clean install
@@ -428,10 +628,12 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
@click.option('-v', '--verbose',
is_flag=True, default=False)
#pylint: disable=too-many-arguments
+#pylint: disable=line-too-long
# Main CLI entrypoint, not much we can do about too many arguments.
def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_config, ansible_log_path, verbose):
"""
- atomic-openshift-installer makes the process for installing OSE or AEP easier by interactively gathering the data needed to run on each host.
+ atomic-openshift-installer makes the process for installing OSE or AEP
+ easier by interactively gathering the data needed to run on each host.
It can also be run in unattended mode if provided with a configuration file.
Further reading: https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html
@@ -443,7 +645,11 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf
ctx.obj['ansible_log_path'] = ansible_log_path
ctx.obj['verbose'] = verbose
- oo_cfg = OOConfig(ctx.obj['configuration'])
+ try:
+ oo_cfg = OOConfig(ctx.obj['configuration'])
+ except OOConfigInvalidHostError as e:
+ click.echo(e)
+ sys.exit(1)
# If no playbook dir on the CLI, check the config:
if not ansible_playbook_directory:
@@ -458,7 +664,8 @@ def cli(ctx, unattended, configuration, ansible_playbook_directory, ansible_conf
if ctx.obj['ansible_config']:
oo_cfg.settings['ansible_config'] = ctx.obj['ansible_config']
- elif os.path.exists(DEFAULT_ANSIBLE_CONFIG):
+ elif 'ansible_config' not in oo_cfg.settings and \
+ os.path.exists(DEFAULT_ANSIBLE_CONFIG):
# If we're installed by RPM this file should exist and we can use it as our default:
oo_cfg.settings['ansible_config'] = DEFAULT_ANSIBLE_CONFIG
@@ -475,7 +682,7 @@ def uninstall(ctx):
verbose = ctx.obj['verbose']
if len(oo_cfg.hosts) == 0:
- click.echo("No hosts defined in: %s" % oo_cfg['configuration'])
+ click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
click.echo("OpenShift will be uninstalled from the following hosts:\n")
@@ -543,7 +750,10 @@ def install(ctx, force):
else:
oo_cfg = get_missing_info_from_user(oo_cfg)
+ check_hosts_config(oo_cfg, ctx.obj['unattended'])
+
click.echo('Gathering information from hosts...')
+ print_installation_summary(oo_cfg.hosts)
callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
verbose)
if error:
@@ -568,8 +778,8 @@ def install(ctx, force):
click.echo('Ready to run installation process.')
message = """
-If changes are needed to the values recorded by the installer please update {}.
-""".format(oo_cfg.config_path)
+If changes are needed please edit the config file above and re-run.
+"""
if not ctx.obj['unattended']:
confirm_continue(message)
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index 9c97e6e93..1be85bc1d 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -36,19 +36,24 @@ class Host(object):
self.public_ip = kwargs.get('public_ip', None)
self.public_hostname = kwargs.get('public_hostname', None)
self.connect_to = kwargs.get('connect_to', None)
+ self.preconfigured = kwargs.get('preconfigured', None)
# Should this host run as an OpenShift master:
self.master = kwargs.get('master', False)
# Should this host run as an OpenShift node:
self.node = kwargs.get('node', False)
+
+ # Should this host run as an HAProxy:
+ self.master_lb = kwargs.get('master_lb', False)
+
self.containerized = kwargs.get('containerized', False)
if self.connect_to is None:
- raise OOConfigInvalidHostError("You must specify either and 'ip' " \
- "or 'hostname' to connect to.")
+ raise OOConfigInvalidHostError("You must specify either an ip " \
+ "or hostname as 'connect_to'")
- if self.master is False and self.node is False:
+ if self.master is False and self.node is False and self.master_lb is False:
raise OOConfigInvalidHostError(
"You must specify each host as either a master or a node.")
@@ -62,12 +67,38 @@ class Host(object):
""" Used when exporting to yaml. """
d = {}
for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
- 'master', 'node', 'containerized', 'connect_to']:
+ 'master', 'node', 'master_lb', 'containerized', 'connect_to', 'preconfigured']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
return d
+ def is_etcd_member(self, all_hosts):
+ """ Will this host be a member of a standalone etcd cluster. """
+ if not self.master:
+ return False
+ masters = [host for host in all_hosts if host.master]
+ if len(masters) > 1:
+ return True
+ return False
+
+ def is_dedicated_node(self):
+ """ Will this host be a dedicated node. (not a master) """
+ return self.node and not self.master
+
+ def is_schedulable_node(self, all_hosts):
+ """ Will this host be a node marked as schedulable. """
+ if not self.node:
+ return False
+ if not self.master:
+ return True
+
+ masters = [host for host in all_hosts if host.master]
+ nodes = [host for host in all_hosts if host.node]
+ if len(masters) == len(nodes):
+ return True
+ return False
+
class OOConfig(object):
default_dir = os.path.normpath(
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index fdd0c1168..17196a813 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -17,14 +17,17 @@ def set_config(cfg):
def generate_inventory(hosts):
global CFG
+ masters = [host for host in hosts if host.master]
+ nodes = [host for host in hosts if host.node]
+ proxy = determine_proxy_configuration(hosts)
+ multiple_masters = len(masters) > 1
base_inventory_path = CFG.settings['ansible_inventory_path']
base_inventory = open(base_inventory_path, 'w')
- base_inventory.write('\n[OSEv3:children]\nmasters\nnodes\n')
- base_inventory.write('\n[OSEv3:vars]\n')
- base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user']))
- if CFG.settings['ansible_ssh_user'] != 'root':
- base_inventory.write('ansible_become=true\n')
+
+ write_inventory_children(base_inventory, multiple_masters, proxy)
+
+ write_inventory_vars(base_inventory, multiple_masters, proxy)
# Find the correct deployment type for ansible:
ver = find_variant(CFG.settings['variant'],
@@ -45,24 +48,66 @@ def generate_inventory(hosts):
"'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))
base_inventory.write('\n[masters]\n')
- masters = (host for host in hosts if host.master)
for master in masters:
write_host(master, base_inventory)
+
+ if len(masters) > 1:
+ base_inventory.write('\n[etcd]\n')
+ for master in masters:
+ write_host(master, base_inventory)
+
base_inventory.write('\n[nodes]\n')
- nodes = (host for host in hosts if host.node)
+
for node in nodes:
- # TODO: Until the Master can run the SDN itself we have to configure the Masters
- # as Nodes too.
- scheduleable = True
- # If there's only one Node and it's also a Master we want it to be scheduleable:
- if node in masters and len(masters) != 1:
- scheduleable = False
- write_host(node, base_inventory, scheduleable)
+ # Let the fact defaults decide if we're not a master:
+ schedulable = None
+
+ # If the node is also a master, we must explicitly set schedulablity:
+ if node.master:
+ schedulable = node.is_schedulable_node(hosts)
+ write_host(node, base_inventory, schedulable)
+
+ if not getattr(proxy, 'preconfigured', True):
+ base_inventory.write('\n[lb]\n')
+ write_host(proxy, base_inventory)
+
base_inventory.close()
return base_inventory_path
+def determine_proxy_configuration(hosts):
+ proxy = next((host for host in hosts if host.master_lb), None)
+ if proxy:
+ if proxy.hostname == None:
+ proxy.hostname = proxy.connect_to
+ proxy.public_hostname = proxy.connect_to
+ return proxy
+
+ return None
-def write_host(host, inventory, scheduleable=True):
+def write_inventory_children(base_inventory, multiple_masters, proxy):
+ global CFG
+
+ base_inventory.write('\n[OSEv3:children]\n')
+ base_inventory.write('masters\n')
+ base_inventory.write('nodes\n')
+ if multiple_masters:
+ base_inventory.write('etcd\n')
+ if not getattr(proxy, 'preconfigured', True):
+ base_inventory.write('lb\n')
+
+def write_inventory_vars(base_inventory, multiple_masters, proxy):
+ global CFG
+ base_inventory.write('\n[OSEv3:vars]\n')
+ base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user']))
+ if CFG.settings['ansible_ssh_user'] != 'root':
+ base_inventory.write('ansible_become=true\n')
+ if multiple_masters and proxy is not None:
+ base_inventory.write('openshift_master_cluster_method=native\n')
+ base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname))
+ base_inventory.write("openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname))
+
+
+def write_host(host, inventory, schedulable=None):
global CFG
facts = ''
@@ -76,13 +121,21 @@ def write_host(host, inventory, scheduleable=True):
facts += ' openshift_public_hostname={}'.format(host.public_hostname)
# TODO: For not write_host is handles both master and nodes.
# Technically only nodes will ever need this.
- if not scheduleable:
- facts += ' openshift_scheduleable=False'
+
+ # Distinguish between three states, no schedulability specified (use default),
+ # explicitly set to True, or explicitly set to False:
+ if schedulable is None:
+ pass
+ elif schedulable:
+ facts += ' openshift_schedulable=True'
+ elif not schedulable:
+ facts += ' openshift_schedulable=False'
+
installer_host = socket.gethostname()
if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
facts += ' ansible_connection=local'
if os.geteuid() != 0:
- no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo openshift'])
+ no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', 'openshift'])
if no_pwd_sudo == 1:
print 'The atomic-openshift-installer requires sudo access without a password.'
sys.exit(1)
@@ -118,6 +171,7 @@ def default_facts(hosts, verbose=False):
facts_env = os.environ.copy()
facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml']
facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory']
+ facts_env["OPENSHIFT_MASTER_CLUSTER_METHOD"] = 'native'
if 'ansible_log_path' in CFG.settings:
facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
@@ -130,10 +184,10 @@ def run_main_playbook(hosts, hosts_to_run_on, verbose=False):
inventory_file = generate_inventory(hosts_to_run_on)
if len(hosts_to_run_on) != len(hosts):
main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
- 'playbooks/common/openshift-cluster/scaleup.yml')
+ 'playbooks/byo/openshift-cluster/scaleup.yml')
else:
main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
- 'playbooks/byo/config.yml')
+ 'playbooks/byo/openshift-cluster/config.yml')
facts_env = os.environ.copy()
if 'ansible_log_path' in CFG.settings:
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
@@ -176,4 +230,3 @@ def run_upgrade_playbook(verbose=False):
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
return run_ansible(playbook, inventory_file, facts_env, verbose)
-
diff --git a/utils/src/ooinstall/variants.py b/utils/src/ooinstall/variants.py
index 3bb61dddb..571025543 100644
--- a/utils/src/ooinstall/variants.py
+++ b/utils/src/ooinstall/variants.py
@@ -30,14 +30,14 @@ class Variant(object):
self.versions = versions
def latest_version(self):
- return self.versions[-1]
+ return self.versions[0]
# WARNING: Keep the versions ordered, most recent last:
OSE = Variant('openshift-enterprise', 'OpenShift Enterprise',
[
- Version('3.0', 'enterprise'),
- Version('3.1', 'openshift-enterprise')
+ Version('3.1', 'openshift-enterprise'),
+ Version('3.0', 'enterprise')
]
)
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index fc16d9ceb..d028bf472 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -5,12 +5,10 @@
import copy
import os
import ConfigParser
-import yaml
import ooinstall.cli_installer as cli
-from click.testing import CliRunner
-from test.oo_config_tests import OOInstallFixture
+from test.fixture import OOCliFixture, SAMPLE_CONFIG, build_input, read_yaml
from mock import patch
@@ -41,8 +39,67 @@ MOCK_FACTS = {
},
}
-# Substitute in a product name before use:
-SAMPLE_CONFIG = """
+MOCK_FACTS_QUICKHA = {
+ '10.0.0.1': {
+ 'common': {
+ 'ip': '10.0.0.1',
+ 'public_ip': '10.0.0.1',
+ 'hostname': 'master-private.example.com',
+ 'public_hostname': 'master.example.com'
+ }
+ },
+ '10.0.0.2': {
+ 'common': {
+ 'ip': '10.0.0.2',
+ 'public_ip': '10.0.0.2',
+ 'hostname': 'node1-private.example.com',
+ 'public_hostname': 'node1.example.com'
+ }
+ },
+ '10.0.0.3': {
+ 'common': {
+ 'ip': '10.0.0.3',
+ 'public_ip': '10.0.0.3',
+ 'hostname': 'node2-private.example.com',
+ 'public_hostname': 'node2.example.com'
+ }
+ },
+ '10.0.0.4': {
+ 'common': {
+ 'ip': '10.0.0.4',
+ 'public_ip': '10.0.0.4',
+ 'hostname': 'proxy-private.example.com',
+ 'public_hostname': 'proxy.example.com'
+ }
+ },
+}
+
+# Missing connect_to on some hosts:
+BAD_CONFIG = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ node: true
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+"""
+
+QUICKHA_CONFIG = """
variant: %s
ansible_ssh_user: root
hosts:
@@ -58,6 +115,7 @@ hosts:
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
+ master: true
node: true
- connect_to: 10.0.0.3
ip: 10.0.0.3
@@ -65,109 +123,107 @@ hosts:
public_ip: 24.222.0.3
public_hostname: node2.example.com
node: true
+ master: true
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
+ node: true
+ - connect_to: 10.0.0.5
+ ip: 10.0.0.5
+ hostname: proxy-private.example.com
+ public_ip: 24.222.0.5
+ public_hostname: proxy.example.com
+ master_lb: true
"""
+QUICKHA_2_MASTER_CONFIG = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
+ node: true
+ - connect_to: 10.0.0.5
+ ip: 10.0.0.5
+ hostname: proxy-private.example.com
+ public_ip: 24.222.0.5
+ public_hostname: proxy.example.com
+ master_lb: true
+"""
-class OOCliFixture(OOInstallFixture):
-
- def setUp(self):
- OOInstallFixture.setUp(self)
- self.runner = CliRunner()
-
- # Add any arguments you would like to test here, the defaults ensure
- # we only do unattended invocations here, and using temporary files/dirs.
- self.cli_args = ["-a", self.work_dir]
-
- def run_cli(self):
- return self.runner.invoke(cli.cli, self.cli_args)
-
- def assert_result(self, result, exit_code):
- if result.exception is not None or result.exit_code != exit_code:
- print "Unexpected result from CLI execution"
- print "Exit code: %s" % result.exit_code
- print "Exception: %s" % result.exception
- print result.exc_info
- import traceback
- traceback.print_exception(*result.exc_info)
- print "Output:\n%s" % result.output
- self.fail("Exception during CLI execution")
-
- def _read_yaml(self, config_file_path):
- f = open(config_file_path, 'r')
- config = yaml.safe_load(f.read())
- f.close()
- return config
-
- def _verify_load_facts(self, load_facts_mock):
- """ Check that we ran load facts with expected inputs. """
- load_facts_args = load_facts_mock.call_args[0]
- self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
- load_facts_args[0])
- self.assertEquals(os.path.join(self.work_dir,
- "playbooks/byo/openshift_facts.yml"), load_facts_args[1])
- env_vars = load_facts_args[2]
- self.assertEquals(os.path.join(self.work_dir,
- '.ansible/callback_facts.yaml'),
- env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
- self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
-
- def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
- """ Check that we ran playbook with expected inputs. """
- hosts = run_playbook_mock.call_args[0][0]
- hosts_to_run_on = run_playbook_mock.call_args[0][1]
- self.assertEquals(exp_hosts_len, len(hosts))
- self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
-
- def _verify_config_hosts(self, written_config, host_count):
- print written_config['hosts']
- self.assertEquals(host_count, len(written_config['hosts']))
- for h in written_config['hosts']:
- self.assertTrue(h['node'])
- self.assertTrue('ip' in h)
- self.assertTrue('hostname' in h)
- self.assertTrue('public_ip' in h)
- self.assertTrue('public_hostname' in h)
-
- #pylint: disable=too-many-arguments
- def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
- run_playbook_mock, cli_input,
- exp_hosts_len=None, exp_hosts_to_run_on_len=None,
- force=None):
- """
- Tests cli_installer.py:get_hosts_to_run_on. That method has quite a
- few subtle branches in the logic. The goal with this method is simply
- to handle all the messy stuff here and allow the main test cases to be
- easily read. The basic idea is to modify mock_facts to return a
- version indicating OpenShift is already installed on particular hosts.
- """
- load_facts_mock.return_value = (mock_facts, 0)
- run_playbook_mock.return_value = 0
-
- if cli_input:
- self.cli_args.append("install")
- result = self.runner.invoke(cli.cli,
- self.cli_args,
- input=cli_input)
- else:
- config_file = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
-
- self.cli_args.extend(["-c", config_file, "install"])
- if force:
- self.cli_args.append("--force")
- result = self.runner.invoke(cli.cli, self.cli_args)
- written_config = self._read_yaml(config_file)
- self._verify_config_hosts(written_config, exp_hosts_len)
-
- self.assert_result(result, 0)
- self._verify_load_facts(load_facts_mock)
- self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len)
+QUICKHA_CONFIG_REUSED_LB = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ master: true
+ node: true
+ master_lb: true
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+ master: true
+"""
- # Make sure we ran on the expected masters and nodes:
- hosts = run_playbook_mock.call_args[0][0]
- hosts_to_run_on = run_playbook_mock.call_args[0][1]
- self.assertEquals(exp_hosts_len, len(hosts))
- self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+QUICKHA_CONFIG_NO_LB = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+ master: true
+"""
class UnattendedCliTests(OOCliFixture):
@@ -284,7 +340,9 @@ class UnattendedCliTests(OOCliFixture):
'.ansible/callback_facts.yaml'),
env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
- self.assertTrue('ANSIBLE_CONFIG' not in env_vars)
+ # If user running test has rpm installed, this might be set to default:
+ self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
+ env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
# Make sure we ran on the expected masters and nodes:
hosts = run_playbook_mock.call_args[0][0]
@@ -345,7 +403,7 @@ class UnattendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
- written_config = self._read_yaml(config_file)
+ written_config = read_yaml(config_file)
self.assertEquals('openshift-enterprise', written_config['variant'])
# We didn't specify a version so the latest should have been assumed,
@@ -374,7 +432,7 @@ class UnattendedCliTests(OOCliFixture):
result = self.runner.invoke(cli.cli, self.cli_args)
self.assert_result(result, 0)
- written_config = self._read_yaml(config_file)
+ written_config = read_yaml(config_file)
self.assertEquals('openshift-enterprise', written_config['variant'])
# Make sure our older version was preserved:
@@ -450,14 +508,105 @@ class UnattendedCliTests(OOCliFixture):
if expected_result:
self.assertEquals(expected_result, facts_env_vars['ANSIBLE_CONFIG'])
else:
- self.assertFalse('ANSIBLE_CONFIG' in facts_env_vars)
+ # If user running test has rpm installed, this might be set to default:
+ self.assertTrue('ANSIBLE_CONFIG' not in facts_env_vars or
+ facts_env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
# Test the env vars for main playbook:
env_vars = run_ansible_mock.call_args[0][2]
if expected_result:
self.assertEquals(expected_result, env_vars['ANSIBLE_CONFIG'])
else:
- self.assertFalse('ANSIBLE_CONFIG' in env_vars)
+ # If user running test has rpm installed, this might be set to default:
+ self.assertTrue('ANSIBLE_CONFIG' not in env_vars or
+ env_vars['ANSIBLE_CONFIG'] == cli.DEFAULT_ANSIBLE_CONFIG)
+
+ # unattended with bad config file and no installed hosts (without --force)
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_bad_config(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), BAD_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ self.assertEquals(1, result.exit_code)
+ self.assertTrue("You must specify either an ip or hostname"
+ in result.output)
+
+ #unattended with three masters, one node, and haproxy
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_full_run(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ self.assert_result(result, 0)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(5, len(hosts))
+ self.assertEquals(5, len(hosts_to_run_on))
+
+ #unattended with two masters, one node, and haproxy
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_only_2_masters(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_2_MASTER_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is an invalid config:
+ self.assert_result(result, 1)
+ self.assertTrue("A minimum of 3 Masters are required" in result.output)
+
+ #unattended with three masters, one node, but no load balancer specified:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_no_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG_NO_LB % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is not a valid input:
+ self.assert_result(result, 1)
+ self.assertTrue('No master load balancer specified in config' in result.output)
+
+ #unattended with three masters, one node, and one of the masters reused as load balancer:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_quick_ha_reused_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ config_file = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), QUICKHA_CONFIG_REUSED_LB % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ result = self.runner.invoke(cli.cli, self.cli_args)
+
+ # This is not a valid configuration:
+ self.assert_result(result, 1)
class AttendedCliTests(OOCliFixture):
@@ -468,64 +617,13 @@ class AttendedCliTests(OOCliFixture):
self.config_file = os.path.join(self.work_dir, 'config.yml')
self.cli_args.extend(["-c", self.config_file])
- #pylint: disable=too-many-arguments
- def _build_input(self, ssh_user=None, hosts=None, variant_num=None,
- add_nodes=None, confirm_facts=None):
- """
- Builds a CLI input string with newline characters to simulate
- the full run.
- This gives us only one place to update when the input prompts change.
- """
-
- inputs = [
- 'y', # let's proceed
- ]
- if ssh_user:
- inputs.append(ssh_user)
-
- if hosts:
- i = 0
- for (host, is_master) in hosts:
- inputs.append(host)
- inputs.append('y' if is_master else 'n')
- #inputs.append('rpm')
- if i < len(hosts) - 1:
- inputs.append('y') # Add more hosts
- else:
- inputs.append('n') # Done adding hosts
- i += 1
-
- if variant_num:
- inputs.append(str(variant_num)) # Choose variant + version
-
- # TODO: support option 2, fresh install
- if add_nodes:
- inputs.append('1') # Add more nodes
- i = 0
- for (host, is_master) in add_nodes:
- inputs.append(host)
- inputs.append('y' if is_master else 'n')
- #inputs.append('rpm')
- if i < len(add_nodes) - 1:
- inputs.append('y') # Add more hosts
- else:
- inputs.append('n') # Done adding hosts
- i += 1
-
- inputs.extend([
- confirm_facts,
- 'y', # lets do this
- ])
-
- return '\n'.join(inputs)
-
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_full_run(self, load_facts_mock, run_playbook_mock):
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', False),
('10.0.0.3', False)],
@@ -540,9 +638,18 @@ class AttendedCliTests(OOCliFixture):
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 3, 3)
- written_config = self._read_yaml(self.config_file)
+ written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 3)
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
+ self.assertEquals('False',
+ inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
+ self.assertEquals(None,
+ inventory.get('nodes', '10.0.0.2'))
+ self.assertEquals(None,
+ inventory.get('nodes', '10.0.0.3'))
+
# interactive with config file and some installed some uninstalled hosts
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
@@ -557,7 +664,7 @@ class AttendedCliTests(OOCliFixture):
load_facts_mock.return_value = (mock_facts, 0)
run_playbook_mock.return_value = 0
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True),
('10.0.0.2', False),
],
@@ -574,7 +681,7 @@ class AttendedCliTests(OOCliFixture):
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 3, 2)
- written_config = self._read_yaml(self.config_file)
+ written_config = read_yaml(self.config_file)
self._verify_config_hosts(written_config, 3)
@patch('ooinstall.openshift_ansible.run_main_playbook')
@@ -586,7 +693,7 @@ class AttendedCliTests(OOCliFixture):
config_file = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'),
SAMPLE_CONFIG % 'openshift-enterprise')
- cli_input = self._build_input(confirm_facts='y')
+ cli_input = build_input(confirm_facts='y')
self.cli_args.extend(["-c", config_file])
self.cli_args.append("install")
result = self.runner.invoke(cli.cli,
@@ -597,7 +704,7 @@ class AttendedCliTests(OOCliFixture):
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, 3, 3)
- written_config = self._read_yaml(config_file)
+ written_config = read_yaml(config_file)
self._verify_config_hosts(written_config, 3)
#interactive with config file and all installed hosts
@@ -608,12 +715,13 @@ class AttendedCliTests(OOCliFixture):
mock_facts['10.0.0.1']['common']['version'] = "3.0.0"
mock_facts['10.0.0.2']['common']['version'] = "3.0.0"
- cli_input = self._build_input(hosts=[
+ cli_input = build_input(hosts=[
('10.0.0.1', True),
],
add_nodes=[('10.0.0.2', False)],
ssh_user='root',
variant_num=1,
+ schedulable_masters_ok=True,
confirm_facts='y')
self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock,
@@ -623,6 +731,131 @@ class AttendedCliTests(OOCliFixture):
exp_hosts_to_run_on_len=2,
force=False)
+ #interactive multimaster: one more node than master
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_ha_dedicated_node(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True),
+ ('10.0.0.2', True),
+ ('10.0.0.3', True),
+ ('10.0.0.4', False)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=('10.0.0.5', False))
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 5, 5)
+
+ written_config = read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 5)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
+ self.assertEquals('False',
+ inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
+ self.assertEquals('False',
+ inventory.get('nodes', '10.0.0.2 openshift_schedulable'))
+ self.assertEquals('False',
+ inventory.get('nodes', '10.0.0.3 openshift_schedulable'))
+ self.assertEquals(None,
+ inventory.get('nodes', '10.0.0.4'))
+
+ self.assertTrue(inventory.has_section('etcd'))
+ self.assertEquals(3, len(inventory.items('etcd')))
+
+ #interactive multimaster: identical masters and nodes
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_ha_no_dedicated_nodes(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True),
+ ('10.0.0.2', True),
+ ('10.0.0.3', True)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=('10.0.0.5', False))
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 4, 4)
+
+ written_config = read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 4)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
+ self.assertEquals('True',
+ inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
+ self.assertEquals('True',
+ inventory.get('nodes', '10.0.0.2 openshift_schedulable'))
+ self.assertEquals('True',
+ inventory.get('nodes', '10.0.0.3 openshift_schedulable'))
+
+ #interactive multimaster: attempting to use a master as the load balancer should fail:
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_ha_reuse_master_as_lb(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True),
+ ('10.0.0.2', True),
+ ('10.0.0.3', False),
+ ('10.0.0.4', True)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y',
+ master_lb=(['10.0.0.2', '10.0.0.5'], False))
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ #interactive all-in-one
+ @patch('ooinstall.openshift_ansible.run_main_playbook')
+ @patch('ooinstall.openshift_ansible.load_system_facts')
+ def test_all_in_one(self, load_facts_mock, run_playbook_mock):
+ load_facts_mock.return_value = (MOCK_FACTS, 0)
+ run_playbook_mock.return_value = 0
+
+ cli_input = build_input(hosts=[
+ ('10.0.0.1', True)],
+ ssh_user='root',
+ variant_num=1,
+ confirm_facts='y')
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli, self.cli_args,
+ input=cli_input)
+ self.assert_result(result, 0)
+
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, 1, 1)
+
+ written_config = read_yaml(self.config_file)
+ self._verify_config_hosts(written_config, 1)
+
+ inventory = ConfigParser.ConfigParser(allow_no_value=True)
+ inventory.read(os.path.join(self.work_dir, '.ansible/hosts'))
+ self.assertEquals('True',
+ inventory.get('nodes', '10.0.0.1 openshift_schedulable'))
+
# TODO: test with config file, attended add node
# TODO: test with config file, attended new node already in config file
# TODO: test with config file, attended new node already in config file, plus manually added nodes
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
new file mode 100644
index 000000000..90bd9e1ef
--- /dev/null
+++ b/utils/test/fixture.py
@@ -0,0 +1,221 @@
+# pylint: disable=missing-docstring
+import os
+import yaml
+
+import ooinstall.cli_installer as cli
+
+from test.oo_config_tests import OOInstallFixture
+from click.testing import CliRunner
+
+# Substitute in a product name before use:
+SAMPLE_CONFIG = """
+variant: %s
+ansible_ssh_user: root
+hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ node: true
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+"""
+
+def read_yaml(config_file_path):
+ cfg_f = open(config_file_path, 'r')
+ config = yaml.safe_load(cfg_f.read())
+ cfg_f.close()
+ return config
+
+
+class OOCliFixture(OOInstallFixture):
+
+ def setUp(self):
+ OOInstallFixture.setUp(self)
+ self.runner = CliRunner()
+
+ # Add any arguments you would like to test here, the defaults ensure
+ # we only do unattended invocations here, and using temporary files/dirs.
+ self.cli_args = ["-a", self.work_dir]
+
+ def run_cli(self):
+ return self.runner.invoke(cli.cli, self.cli_args)
+
+ def assert_result(self, result, exit_code):
+ if result.exit_code != exit_code:
+ print "Unexpected result from CLI execution"
+ print "Exit code: %s" % result.exit_code
+ print "Exception: %s" % result.exception
+ print result.exc_info
+ import traceback
+ traceback.print_exception(*result.exc_info)
+ print "Output:\n%s" % result.output
+ self.fail("Exception during CLI execution")
+
+ def _verify_load_facts(self, load_facts_mock):
+ """ Check that we ran load facts with expected inputs. """
+ load_facts_args = load_facts_mock.call_args[0]
+ self.assertEquals(os.path.join(self.work_dir, ".ansible/hosts"),
+ load_facts_args[0])
+ self.assertEquals(os.path.join(self.work_dir,
+ "playbooks/byo/openshift_facts.yml"),
+ load_facts_args[1])
+ env_vars = load_facts_args[2]
+ self.assertEquals(os.path.join(self.work_dir,
+ '.ansible/callback_facts.yaml'),
+ env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
+ self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
+
+ def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
+ """ Check that we ran playbook with expected inputs. """
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+ def _verify_config_hosts(self, written_config, host_count):
+ self.assertEquals(host_count, len(written_config['hosts']))
+ for host in written_config['hosts']:
+ self.assertTrue('hostname' in host)
+ self.assertTrue('public_hostname' in host)
+ if 'preconfigured' not in host:
+ self.assertTrue(host['node'])
+ self.assertTrue('ip' in host)
+ self.assertTrue('public_ip' in host)
+
+ #pylint: disable=too-many-arguments
+ def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
+ run_playbook_mock, cli_input,
+ exp_hosts_len=None, exp_hosts_to_run_on_len=None,
+ force=None):
+ """
+ Tests cli_installer.py:get_hosts_to_run_on. That method has quite a
+ few subtle branches in the logic. The goal with this method is simply
+ to handle all the messy stuff here and allow the main test cases to be
+ easily read. The basic idea is to modify mock_facts to return a
+ version indicating OpenShift is already installed on particular hosts.
+ """
+ load_facts_mock.return_value = (mock_facts, 0)
+ run_playbook_mock.return_value = 0
+
+ if cli_input:
+ self.cli_args.append("install")
+ result = self.runner.invoke(cli.cli,
+ self.cli_args,
+ input=cli_input)
+ else:
+ config_file = self.write_config(
+ os.path.join(self.work_dir,
+ 'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
+
+ self.cli_args.extend(["-c", config_file, "install"])
+ if force:
+ self.cli_args.append("--force")
+ result = self.runner.invoke(cli.cli, self.cli_args)
+ written_config = read_yaml(config_file)
+ self._verify_config_hosts(written_config, exp_hosts_len)
+
+ self.assert_result(result, 0)
+ self._verify_load_facts(load_facts_mock)
+ self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len)
+
+ # Make sure we ran on the expected masters and nodes:
+ hosts = run_playbook_mock.call_args[0][0]
+ hosts_to_run_on = run_playbook_mock.call_args[0][1]
+ self.assertEquals(exp_hosts_len, len(hosts))
+ self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
+
+
+#pylint: disable=too-many-arguments,too-many-branches
+def build_input(ssh_user=None, hosts=None, variant_num=None,
+ add_nodes=None, confirm_facts=None, schedulable_masters_ok=None,
+ master_lb=None):
+ """
+ Build an input string simulating a user entering values in an interactive
+ attended install.
+
+ This is intended to give us one place to update when the CLI prompts change.
+ We should aim to keep this dependent on optional keyword arguments with
+ sensible defaults to keep things from getting too fragile.
+ """
+
+ inputs = [
+ 'y', # let's proceed
+ ]
+ if ssh_user:
+ inputs.append(ssh_user)
+
+ if variant_num:
+ inputs.append(str(variant_num)) # Choose variant + version
+
+ num_masters = 0
+ if hosts:
+ i = 0
+ for (host, is_master) in hosts:
+ inputs.append(host)
+ if is_master:
+ inputs.append('y')
+ num_masters += 1
+ else:
+ inputs.append('n')
+ #inputs.append('rpm')
+ # We should not be prompted to add more hosts if we're currently at
+ # 2 masters, this is an invalid HA configuration, so this question
+ # will not be asked, and the user must enter the next host:
+ if num_masters != 2:
+ if i < len(hosts) - 1:
+ if num_masters >= 1:
+ inputs.append('y') # Add more hosts
+ else:
+ inputs.append('n') # Done adding hosts
+ i += 1
+
+ # You can pass a single master_lb or a list if you intend for one to get rejected:
+ if master_lb:
+ if isinstance(master_lb[0], list) or isinstance(master_lb[0], tuple):
+ inputs.extend(master_lb[0])
+ else:
+ inputs.append(master_lb[0])
+ inputs.append('y' if master_lb[1] else 'n')
+
+ # TODO: support option 2, fresh install
+ if add_nodes:
+ if schedulable_masters_ok:
+ inputs.append('y')
+ inputs.append('1') # Add more nodes
+ i = 0
+ for (host, is_master) in add_nodes:
+ inputs.append(host)
+ #inputs.append('rpm')
+ if i < len(add_nodes) - 1:
+ inputs.append('y') # Add more hosts
+ else:
+ inputs.append('n') # Done adding hosts
+ i += 1
+
+ if add_nodes is None:
+ total_hosts = hosts
+ else:
+ total_hosts = hosts + add_nodes
+ if total_hosts is not None and num_masters == len(total_hosts):
+ inputs.append('y')
+
+ inputs.extend([
+ confirm_facts,
+ 'y', # lets do this
+ ])
+
+ return '\n'.join(inputs)
+
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
index 0dd4a30e9..9f5f8e244 100644
--- a/utils/test/oo_config_tests.py
+++ b/utils/test/oo_config_tests.py
@@ -73,6 +73,29 @@ hosts:
node: true
"""
+CONFIG_BAD = """
+variant: openshift-enterprise
+ansible_ssh_user: root
+hosts:
+ - connect_to: master-private.example.com
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ master: true
+ node: true
+ - ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ node: true
+ - connect_to: node2-private.example.com
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ node: true
+"""
class OOInstallFixture(unittest.TestCase):
@@ -161,6 +184,17 @@ class OOConfigTests(OOInstallFixture):
self.assertEquals('openshift-enterprise', ooconfig.settings['variant'])
self.assertEquals('v1', ooconfig.settings['version'])
+ def test_load_bad_config(self):
+
+ cfg_path = self.write_config(os.path.join(self.work_dir,
+ 'ooinstall.conf'), CONFIG_BAD)
+ try:
+ OOConfig(cfg_path)
+ assert False
+ except OOConfigInvalidHostError:
+ assert True
+
+
def test_load_complete_facts(self):
cfg_path = self.write_config(os.path.join(self.work_dir,
'ooinstall.conf'), SAMPLE_CONFIG)